language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | weaviate__weaviate-python-client | weaviate/collections/data/sync.py | {
"start": 362,
"end": 822
} | class ____(Generic[Properties], _DataCollectionExecutor[ConnectionSync]):
def with_data_model(self, data_model: Type[TProperties]) -> "_DataCollection[TProperties]":
_check_properties_generic(data_model)
return _DataCollection[TProperties](
self._connection,
self.name,
self._consistency_level,
self._tenant,
self._validate_arguments,
data_model,
)
| _DataCollection |
python | docker__docker-py | docker/api/volume.py | {
"start": 31,
"end": 5119
} | class ____:
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(dict): Dictionary with list of volume objects as value of the
``Volumes`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
u'Name': u'baz'}]}
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/volumes')
return self._result(self._get(url, params=params), True)
def create_volume(self, name=None, driver=None, driver_opts=None,
labels=None):
"""
Create and register a named volume
Args:
name (str): Name of the volume
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(dict): The created volume reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = client.api.create_volume(
... name='foobar',
... driver='local',
... driver_opts={'foo': 'bar', 'baz': 'false'},
... labels={"key": "value"},
... )
... print(volume)
{u'Driver': u'local',
u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar',
u'Scope': u'local'}
"""
url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'DriverOpts': driver_opts,
}
if labels is not None:
if utils.compare_version('1.23', self._version) < 0:
raise errors.InvalidVersion(
'volume labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
return self._result(self._post_json(url, data=data), True)
def inspect_volume(self, name):
"""
Retrieve volume info by name.
Args:
name (str): volume name
Returns:
(dict): Volume information dictionary
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
"""
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def prune_volumes(self, filters=None):
"""
Delete unused volumes
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted volume names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/volumes/prune')
return self._result(self._post(url, params=params), True)
def remove_volume(self, name, force=False):
"""
Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
params = {}
if force:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'force removal was introduced in API 1.25'
)
params = {'force': force}
url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp)
| VolumeApiMixin |
python | huggingface__transformers | src/transformers/models/instructblip/modeling_instructblip.py | {
"start": 20905,
"end": 21633
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerAttention with Blip2->InstructBlip
| InstructBlipQFormerSelfOutput |
python | django__django | tests/i18n/forms.py | {
"start": 396,
"end": 496
} | class ____(forms.Form):
date_field = forms.DateField(widget=forms.SelectDateWidget)
| SelectDateForm |
python | openai__openai-python | src/openai/cli/_api/chat/completions.py | {
"start": 2541,
"end": 2619
} | class ____(NamedTuple):
role: ChatCompletionRole
content: str
| CLIMessage |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/prefetching_ops.py | {
"start": 3567,
"end": 9750
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that copies elements to another device."""
def __init__(self, input_dataset, target_device, source_device="/cpu:0"):
"""Constructs a _CopyToDeviceDataset.
Args:
input_dataset: `Dataset` to be copied
target_device: The name of the device to which elements would be copied.
source_device: Device where input_dataset would be placed.
"""
self._input_dataset = input_dataset._apply_debug_options() # pylint: disable=protected-access
self._target_device = target_device
spec = framework_device.DeviceSpec().from_string(self._target_device)
self._is_gpu_target = (spec.device_type == "GPU")
self._source_device_string = source_device
self._source_device = ops.convert_to_tensor(source_device)
wrap_ds_variant = gen_dataset_ops.wrap_dataset_variant(
self._input_dataset._variant_tensor) # pylint: disable=protected-access
@def_function.function()
def _init_func():
"""Creates an iterator for the input dataset.
Returns:
A `string` tensor that encapsulates the iterator created.
"""
ds_variant = gen_dataset_ops.unwrap_dataset_variant(wrap_ds_variant)
resource = gen_dataset_ops.anonymous_iterator(
**self._input_dataset._flat_structure) # pylint: disable=protected-access
with ops.control_dependencies(
[gen_dataset_ops.make_iterator(ds_variant, resource)]):
return gen_dataset_ops.iterator_to_string_handle(resource)
init_func_concrete = _init_func.get_concrete_function() # pylint: disable=protected-access
@def_function.function()
def _remote_init_func():
return functional_ops.remote_call(
target=self._source_device,
args=init_func_concrete.captured_inputs,
Tout=[dtypes.string],
f=init_func_concrete)
self._init_func = _remote_init_func.get_concrete_function() # pylint: disable=protected-access
self._init_captured_args = self._init_func.captured_inputs
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _next_func(string_handle):
"""Calls get_next for created iterator.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
The elements generated from `input_dataset`
"""
with ops.device(self._source_device_string):
iterator = iterator_ops.Iterator.from_string_handle(
string_handle,
dataset_ops.get_legacy_output_types(self),
dataset_ops.get_legacy_output_shapes(self),
dataset_ops.get_legacy_output_classes(self))
return structure.to_tensor_list(self.element_spec, iterator.get_next())
next_func_concrete = _next_func.get_concrete_function() # pylint: disable=protected-access
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)],
experimental_attributes={"experimental_ints_on_device": True})
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + next_func_concrete.captured_inputs,
Tout=self._input_dataset._flat_types, # pylint: disable=protected-access
f=next_func_concrete)
self._next_func = _remote_next_func.get_concrete_function()
self._next_captured_args = self._next_func.captured_inputs
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _finalize_func(string_handle):
"""Destroys the iterator resource created.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
Tensor constant 0
"""
iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
**self._input_dataset._flat_structure) # pylint: disable=protected-access
with ops.control_dependencies([
resource_variable_ops.destroy_resource_op(
iterator_resource, ignore_lookup_error=True)]):
return array_ops.constant(0, dtypes.int64)
finalize_func_concrete = _finalize_func.get_concrete_function() # pylint: disable=protected-access
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + finalize_func_concrete.captured_inputs,
Tout=[dtypes.int64],
f=finalize_func_concrete)
self._finalize_func = _remote_finalize_func.get_concrete_function( # pylint: disable=protected-access
)
self._finalize_captured_args = self._finalize_func.captured_inputs
g = ops.get_default_graph()
self._init_func.add_to_graph(g)
self._next_func.add_to_graph(g)
self._finalize_func.add_to_graph(g)
# pylint: enable=protected-scope
with ops.device(self._target_device):
variant_tensor = gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
**self._input_dataset._flat_structure) # pylint: disable=protected-access
super(_CopyToDeviceDataset, self).__init__(input_dataset, variant_tensor)
# The one_shot_iterator implementation needs a 0 arg _make_dataset function
# that thereby captures all the inputs required to create the dataset. Since
# there are strings that are inputs to the GeneratorDataset which can't be
# placed on a GPU, this fails for the GPU case. Therefore, disabling it for
# GPU
def make_one_shot_iterator(self):
if self._is_gpu_target:
raise ValueError(
"`make_one_shot_iterator` is not compatible with GPU execution. "
"Please use `Dataset.make_initializable_iterator()` instead."
)
else:
return super(_CopyToDeviceDataset, self).make_one_shot_iterator()
| _CopyToDeviceDataset |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/glue.py | {
"start": 21194,
"end": 26139
} | class ____(AwsBaseHook):
"""
Interact with AWS Glue Data Quality.
Provide thick wrapper around :external+boto3:py:class:`boto3.client("glue") <Glue.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(
self,
*args,
**kwargs,
):
kwargs["client_type"] = "glue"
super().__init__(*args, **kwargs)
def has_data_quality_ruleset(self, name: str) -> bool:
try:
self.conn.get_data_quality_ruleset(Name=name)
return True
except self.conn.exceptions.EntityNotFoundException:
return False
def _log_results(self, result: dict[str, Any]) -> None:
"""
Print the outcome of evaluation run, An evaluation run can involve multiple rulesets evaluated against a data source (Glue table).
Name Description Result EvaluatedMetrics EvaluationMessage
Rule_1 RowCount between 150000 and 600000 PASS {'Dataset.*.RowCount': 300000.0} NaN
Rule_2 IsComplete "marketplace" PASS {'Column.marketplace.Completeness': 1.0} NaN
Rule_3 ColumnLength "marketplace" between 1 and 2 FAIL {'Column.marketplace.MaximumLength': 9.0, 'Column.marketplace.MinimumLength': 3.0} Value: 9.0 does not meet the constraint requirement!
"""
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
self.log.info(
"AWS Glue data quality ruleset evaluation result for RulesetName: %s RulesetEvaluationRunId: %s Score: %s",
result.get("RulesetName"),
result.get("RulesetEvaluationRunId"),
result.get("Score"),
)
rule_results = result["RuleResults"]
rule_results_df = pd.DataFrame(rule_results)
self.log.info(rule_results_df)
def get_evaluation_run_results(self, run_id: str) -> dict[str, Any]:
response = self.conn.get_data_quality_ruleset_evaluation_run(RunId=run_id)
return self.conn.batch_get_data_quality_result(ResultIds=response["ResultIds"])
def validate_evaluation_run_results(
self, evaluation_run_id: str, show_results: bool = True, verify_result_status: bool = True
) -> None:
results = self.get_evaluation_run_results(evaluation_run_id)
total_failed_rules = 0
if results.get("ResultsNotFound"):
self.log.info(
"AWS Glue data quality ruleset evaluation run, results not found for %s",
results["ResultsNotFound"],
)
for result in results["Results"]:
rule_results = result["RuleResults"]
total_failed_rules += len(
list(
filter(
lambda result: result.get("Result") == "FAIL" or result.get("Result") == "ERROR",
rule_results,
)
)
)
if show_results:
self._log_results(result)
self.log.info(
"AWS Glue data quality ruleset evaluation run, total number of rules failed: %s",
total_failed_rules,
)
if verify_result_status and total_failed_rules > 0:
raise AirflowException(
"AWS Glue data quality ruleset evaluation run failed for one or more rules"
)
def log_recommendation_results(self, run_id: str) -> None:
"""
Print the outcome of recommendation run, recommendation run generates multiple rules against a data source (Glue table) in Data Quality Definition Language (DQDL) format.
Rules = [
IsComplete "NAME",
ColumnLength "EMP_ID" between 1 and 12,
IsUnique "EMP_ID",
ColumnValues "INCOME" > 50000
]
"""
result = self.conn.get_data_quality_rule_recommendation_run(RunId=run_id)
if result.get("RecommendedRuleset"):
self.log.info(
"AWS Glue data quality recommended rules for DatabaseName: %s TableName: %s",
result["DataSource"]["GlueTable"]["DatabaseName"],
result["DataSource"]["GlueTable"]["TableName"],
)
self.log.info(result["RecommendedRuleset"])
else:
self.log.info("AWS Glue data quality, no recommended rules available for RunId: %s", run_id)
| GlueDataQualityHook |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride4.py | {
"start": 561,
"end": 746
} | class ____(BaseA[_TSource]):
def method1(
self, mapper: Callable[[_TSource, _T2], _TResult], other: BaseA[_T2]
) -> BaseA[_TResult]:
return SubclassA2()
| SubclassA1 |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 24242,
"end": 24369
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Cumulus-linux'
strategy_class = FileStrategy
| CumulusHostname |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/indexer/tags_validator.py | {
"start": 810,
"end": 965
} | class ____(TagsValidator):
"""
The release health pipeline has the same limits as the default tags limit enforcer.
"""
| ReleaseHealthTagsValidator |
python | coleifer__peewee | tests/regressions.py | {
"start": 48799,
"end": 50326
} | class ____(ModelTestCase):
requires = [FKF_A, FKF_B]
def test_query_with_model_instance_param(self):
a1 = FKF_A.create(key='k1')
a2 = FKF_A.create(key='k2')
b1 = FKF_B.create(fk_a_1=a1, fk_a_2=a1)
b2 = FKF_B.create(fk_a_1=a2, fk_a_2=a2)
# Ensure that UPDATE works as expected as well.
b1.save()
# See also keys.TestFKtoNonPKField test, which replicates much of this.
args = (b1.fk_a_1, b1.fk_a_1_id, a1, a1.key)
for arg in args:
query = FKF_B.select().where(FKF_B.fk_a_1 == arg)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."fk_a_1_id", "t1"."fk_a_2" '
'FROM "fkf_b" AS "t1" '
'WHERE ("t1"."fk_a_1_id" = ?)'), ['k1'])
b1_db = query.get()
self.assertEqual(b1_db.id, b1.id)
# When we are handed a model instance and a conversion (an IntegerField
# in this case), when the attempted conversion fails we fall back to
# using the given model's primary-key.
args = (b1.fk_a_2, a1, a1.id)
for arg in args:
query = FKF_B.select().where(FKF_B.fk_a_2 == arg)
self.assertSQL(query, (
'SELECT "t1"."id", "t1"."fk_a_1_id", "t1"."fk_a_2" '
'FROM "fkf_b" AS "t1" '
'WHERE ("t1"."fk_a_2" = ?)'), [a1.id])
b1_db = query.get()
self.assertEqual(b1_db.id, b1.id)
@skip_if(IS_SQLITE_OLD or IS_MYSQL)
| TestQueryWithModelInstanceParam |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-ibm/tests/test_ibm.py | {
"start": 73,
"end": 1405
} | class ____:
TEST_URL = "https://us-south.ml.cloud.ibm.com"
TEST_APIKEY = "test_apikey"
TEST_PROJECT_ID = "test_project_id"
TEST_MODEL = "test_rerank_model"
def test_initialization(self) -> None:
with pytest.raises(ValueError, match=r"^Did not find"):
_ = WatsonxRerank(model_id=self.TEST_MODEL, project_id=self.TEST_PROJECT_ID)
# Cloud scenario
with pytest.raises(ValueError, match=r"^Did not find 'apikey' or 'token',"):
_ = WatsonxRerank(
model_id=self.TEST_MODEL,
url=self.TEST_URL,
project_id=self.TEST_PROJECT_ID,
)
# CPD scenario with password and missing username
with pytest.raises(ValueError, match=r"^Did not find username"):
_ = WatsonxRerank(
model_id=self.TEST_MODEL,
password="123",
url="cpd-instance",
project_id=self.TEST_PROJECT_ID,
)
# CPD scenario with apikey and missing username
with pytest.raises(ValueError, match=r"^Did not find username"):
_ = WatsonxRerank(
model_id=self.TEST_MODEL,
apikey="123",
url="cpd-instance",
project_id=self.TEST_PROJECT_ID,
)
| TestWasonxRerank |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/exc.py | {
"start": 5520,
"end": 5641
} | class ____(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
| UnmappedColumnError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/special_math_test.py | {
"start": 12735,
"end": 13374
} | class ____(test.TestCase):
def testErfInvValues(self):
with self.cached_session():
if not special:
return
x = np.linspace(0., 1.0, 50).astype(np.float64)
expected_x = special.erfinv(x)
x = special_math.erfinv(x)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testErfInvIntegerInput(self):
with self.cached_session():
with self.assertRaises(TypeError):
x = np.array([1, 2, 3]).astype(np.int32)
special_math.erfinv(x)
with self.assertRaises(TypeError):
x = np.array([1, 2, 3]).astype(np.int64)
special_math.erfinv(x)
| ErfInvTest |
python | python-attrs__attrs | typing-examples/mypy.py | {
"start": 7293,
"end": 7599
} | class ____:
a: list[int] = attr.ib(default=attr.Factory(list))
b: list[Any] = attr.ib(default=attr.Factory(list, False))
c: list[int] = attr.ib(default=attr.Factory((lambda s: s.a), True))
attr.asdict(FactoryTest(), tuple_keys=True)
# Check match_args stub
@attr.s(match_args=False)
| FactoryTest |
python | tensorflow__tensorflow | tensorflow/tools/graph_transforms/python/transform_graph_test.py | {
"start": 1025,
"end": 3153
} | class ____(test.TestCase):
# This test constructs a graph with a relu op that's not used by the normal
# inference path, and then tests that the strip_unused transform removes it as
# expected.
def testTransformGraph(self):
input_graph_def = graph_pb2.GraphDef()
const_op1 = input_graph_def.node.add()
const_op1.op = "Const"
const_op1.name = "const_op1"
const_op1.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op1.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[1, 2], dtypes.float32, [1, 2])))
const_op2 = input_graph_def.node.add()
const_op2.op = "Const"
const_op2.name = "const_op2"
const_op2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
const_op2.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
[3, 4], dtypes.float32, [1, 2])))
# Create an add that has two constants as inputs.
add_op = input_graph_def.node.add()
add_op.op = "Add"
add_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
add_op.name = "add_op"
add_op.input.extend(["const_op1", "const_op2"])
# Create a relu that reads from the add.
relu_op = input_graph_def.node.add()
relu_op.op = "Relu"
relu_op.attr["T"].CopyFrom(attr_value_pb2.AttrValue(
type=dtypes.float32.as_datatype_enum))
relu_op.name = "relu_op"
relu_op.input.extend(["add_op"])
# We're specifying that add_op is the final output, and so the relu isn't
# needed.
input_names = []
output_names = ["add_op"]
transforms = ["strip_unused_nodes"]
transformed_graph_def = graph_transforms.TransformGraph(
input_graph_def, input_names, output_names, transforms)
# We expect that the relu is no longer present after running the transform.
for node in transformed_graph_def.node:
self.assertNotEqual("Relu", node.op)
if __name__ == "__main__":
test.main()
| TransformGraphTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramInference2.py | {
"start": 608,
"end": 743
} | class ____:
def method1(self, fn: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> R:
return fn(*args, **kwargs)
| Parent2 |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 198363,
"end": 198619
} | class ____(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
| UnicodeReadFileObjectClassTestCase |
python | pypa__warehouse | warehouse/sessions.py | {
"start": 2002,
"end": 6914
} | class ____(dict):
_csrf_token_key = "_csrf_token"
_flash_key = "_flash_messages"
_totp_secret_key = "_totp_secret"
_webauthn_challenge_key = "_webauthn_challenge"
_reauth_timestamp_key = "_reauth_timestamp"
_password_timestamp_key = "_password_timestamp"
# A number of our methods need to be decorated so that they also call
# self.changed()
__delitem__ = _changed_method(dict.__delitem__)
__setitem__ = _changed_method(dict.__setitem__)
clear = _changed_method(dict.clear)
pop = _changed_method(dict.pop)
popitem = _changed_method(dict.popitem)
setdefault = _changed_method(dict.setdefault)
update = _changed_method(dict.update)
def __init__(self, data=None, session_id=None, new=True):
# Brand new sessions don't have any data, so we'll just create an empty
# dictionary for them.
if data is None:
data = {}
# Initialize our actual dictionary here.
super().__init__(data)
# We need to track the state of our Session.
self._sid = session_id
self._changed = False
self.new = new
self.created = int(time.time())
# We'll track all of the IDs that have been invalidated here
self.invalidated = set()
@property
def sid(self):
if self._sid is None:
self._sid = crypto.random_token()
return self._sid
def changed(self):
self._changed = True
def invalidate(self):
self.clear()
self.new = True
self.created = int(time.time())
self._changed = False
# If the current session id isn't None we'll want to record it as one
# of the ones that have been invalidated.
if self._sid is not None:
self.invalidated.add(self._sid)
self._sid = None
def should_save(self):
return self._changed
def record_auth_timestamp(self):
self[self._reauth_timestamp_key] = datetime.datetime.now().timestamp()
self.changed()
def record_password_timestamp(self, timestamp):
self[self._password_timestamp_key] = timestamp
self.changed()
def password_outdated(self, current_password_timestamp):
stored_password_timestamp = self.get(self._password_timestamp_key)
if stored_password_timestamp is None:
# This session predates invalidation by password reset... since
# we cannot say for sure, let it live its life.
return False
return current_password_timestamp != stored_password_timestamp
def needs_reauthentication(self, time_to_reauth):
reauth_timestamp = self.get(self._reauth_timestamp_key, 0)
current_time = datetime.datetime.now().timestamp()
return current_time - reauth_timestamp >= time_to_reauth
# Flash Messages Methods
def _get_flash_queue_key(self, queue):
return ".".join(filter(None, [self._flash_key, queue]))
def flash(self, msg, queue="", allow_duplicate=True, safe=False):
queue_key = self._get_flash_queue_key(queue)
# If we're not allowing duplicates check if this message is already
# in the queue, and if it is just return immediately.
if not allow_duplicate and {"msg": msg, "safe": safe} in self.get(
queue_key, []
):
return
self.setdefault(queue_key, []).append({"msg": msg, "safe": safe})
def peek_flash(self, queue=""):
return self.get(self._get_flash_queue_key(queue), [])
def pop_flash(self, queue=""):
queue_key = self._get_flash_queue_key(queue)
messages = [
markupsafe.Markup(m["msg"]) if m["safe"] else m["msg"]
for m in self.get(queue_key, [])
]
self.pop(queue_key, None)
return messages
# CSRF Methods
def new_csrf_token(self):
self[self._csrf_token_key] = crypto.random_token()
return self[self._csrf_token_key]
def get_csrf_token(self):
token = self.get(self._csrf_token_key)
if token is None:
token = self.new_csrf_token()
return token
def get_totp_secret(self):
totp_secret = self.get(self._totp_secret_key)
if totp_secret is None:
totp_secret = self[self._totp_secret_key] = otp.generate_totp_secret()
return totp_secret
def clear_totp_secret(self):
self[self._totp_secret_key] = None
def get_webauthn_challenge(self):
webauthn_challenge = self.get(self._webauthn_challenge_key)
if webauthn_challenge is None:
self[self._webauthn_challenge_key] = webauthn.generate_webauthn_challenge()
webauthn_challenge = self[self._webauthn_challenge_key]
return webauthn_challenge
def clear_webauthn_challenge(self):
self[self._webauthn_challenge_key] = None
@implementer(ISessionFactory)
| Session |
python | huggingface__transformers | src/transformers/models/canine/configuration_canine.py | {
"start": 792,
"end": 6584
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CanineModel`]. It is used to instantiate an
CANINE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CANINE
[google/canine-s](https://huggingface.co/google/canine-s) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the deep Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoders.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoders.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoders, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with.
type_vocab_size (`int`, *optional*, defaults to 16):
The vocabulary size of the `token_type_ids` passed when calling [`CanineModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 57344):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 57345):
End of stream token id.
downsampling_rate (`int`, *optional*, defaults to 4):
The rate at which to downsample the original character sequence length before applying the deep Transformer
encoder.
upsampling_kernel_size (`int`, *optional*, defaults to 4):
The kernel size (i.e. the number of characters in each window) of the convolutional projection layer when
projecting back from `hidden_size`*2 to `hidden_size`.
num_hash_functions (`int`, *optional*, defaults to 8):
The number of hash functions to use. Each hash function has its own embedding matrix.
num_hash_buckets (`int`, *optional*, defaults to 16384):
The number of hash buckets to use.
local_transformer_stride (`int`, *optional*, defaults to 128):
The stride of the local attention of the first shallow Transformer encoder. Defaults to 128 for good
TPU/XLA memory alignment.
Example:
```python
>>> from transformers import CanineConfig, CanineModel
>>> # Initializing a CANINE google/canine-s style configuration
>>> configuration = CanineConfig()
>>> # Initializing a model (with random weights) from the google/canine-s style configuration
>>> model = CanineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "canine"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=16384,
type_vocab_size=16,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
bos_token_id=0xE000,
eos_token_id=0xE001,
downsampling_rate=4,
upsampling_kernel_size=4,
num_hash_functions=8,
num_hash_buckets=16384,
local_transformer_stride=128, # Good TPU/XLA memory alignment.
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
# Character config:
self.downsampling_rate = downsampling_rate
self.upsampling_kernel_size = upsampling_kernel_size
self.num_hash_functions = num_hash_functions
self.num_hash_buckets = num_hash_buckets
self.local_transformer_stride = local_transformer_stride
__all__ = ["CanineConfig"]
| CanineConfig |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/definitions_class.py | {
"start": 5151,
"end": 12165
} | class ____(NamedTuple):
jobs: Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]
schedules: Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]
sensors: Iterable[SensorDefinition]
def _io_manager_needs_replacement(job: JobDefinition, resource_defs: Mapping[str, Any]) -> bool:
"""Explicitly replace the default IO manager in jobs that don't specify one, if a top-level
I/O manager is provided to Definitions.
"""
return (
job.resource_defs.get("io_manager") == default_job_io_manager
and "io_manager" in resource_defs
)
def _attach_resources_to_jobs_and_instigator_jobs(
jobs: Optional[Iterable[Union[JobDefinition, UnresolvedAssetJobDefinition]]],
schedules: Optional[
Iterable[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]
],
sensors: Optional[Iterable[SensorDefinition]],
resource_defs: Mapping[str, Any],
) -> _AttachedObjects:
"""Given a list of jobs, schedules, and sensors along with top-level resource definitions,
attach the resource definitions to the jobs, schedules, and sensors which require them.
"""
jobs = jobs or []
schedules = schedules or []
sensors = sensors or []
# Add jobs in schedules and sensors as well
jobs = [
*jobs,
*[
schedule.job
for schedule in schedules
if isinstance(schedule, ScheduleDefinition)
and schedule.target.has_job_def
and isinstance(schedule.job, (JobDefinition, UnresolvedAssetJobDefinition))
],
*[
target.job_def
for sensor in sensors
for target in sensor.targets
if target.has_job_def
and isinstance(target.job_def, (JobDefinition, UnresolvedAssetJobDefinition))
],
]
# Dedupe
jobs = list({id(job): job for job in jobs}.values())
# Find unsatisfied jobs
unsatisfied_jobs = [
job
for job in jobs
if isinstance(job, JobDefinition)
and (
job.is_missing_required_resources() or _io_manager_needs_replacement(job, resource_defs)
)
]
# Create a mapping of job id to a version of the job with the resource defs bound
unsatisfied_job_to_resource_bound_job = {
id(job): job.with_top_level_resources(
{
**resource_defs,
**job.resource_defs,
# special case for IO manager - the job-level IO manager does not take precedence
# if it is the default and a top-level IO manager is provided
**(
{"io_manager": resource_defs["io_manager"]}
if _io_manager_needs_replacement(job, resource_defs)
else {}
),
}
)
for job in jobs
if job in unsatisfied_jobs
}
# Update all jobs to use the resource bound version
jobs_with_resources = [
unsatisfied_job_to_resource_bound_job[id(job)] if job in unsatisfied_jobs else job
for job in jobs
]
# Update all schedules and sensors to use the resource bound version
updated_schedules = [
(
schedule.with_updated_job(unsatisfied_job_to_resource_bound_job[id(schedule.job)])
if (
isinstance(schedule, ScheduleDefinition)
and schedule.target.has_job_def
and schedule.job in unsatisfied_jobs
)
else schedule
)
for schedule in schedules
]
updated_sensors = [
(
sensor.with_updated_jobs(
[
(
unsatisfied_job_to_resource_bound_job[id(job)]
if job in unsatisfied_jobs
else job
)
for job in sensor.jobs
]
)
if any(target.has_job_def for target in sensor.targets)
and any(job in unsatisfied_jobs for job in sensor.jobs)
else sensor
)
for sensor in sensors
]
return _AttachedObjects(jobs_with_resources, updated_schedules, updated_sensors)
def _create_repository_using_definitions_args(
name: str,
assets: TAssets = None,
schedules: TSchedules = None,
sensors: TSensors = None,
jobs: TJobs = None,
resources: Optional[Mapping[str, Any]] = None,
executor: Optional[Union[ExecutorDefinition, Executor]] = None,
loggers: Optional[Mapping[str, LoggerDefinition]] = None,
asset_checks: TAssetChecks = None,
metadata: Optional[RawMetadataMapping] = None,
component_tree: Optional["ComponentTree"] = None,
) -> RepositoryDefinition:
# First, dedupe all definition types.
sensors = dedupe_object_refs(sensors)
jobs = dedupe_object_refs(jobs)
assets = _canonicalize_specs_to_assets_defs(dedupe_object_refs(assets))
schedules = dedupe_object_refs(schedules)
asset_checks = dedupe_object_refs(asset_checks)
executor_def = (
executor
if isinstance(executor, ExecutorDefinition) or executor is None
else ExecutorDefinition.hardcoded_executor(executor)
)
resource_defs = wrap_resources_for_execution(resources)
# Binds top-level resources to jobs and any jobs attached to schedules or sensors
(
jobs_with_resources,
schedules_with_resources,
sensors_with_resources,
) = _attach_resources_to_jobs_and_instigator_jobs(jobs, schedules, sensors, resource_defs)
@repository(
name=name,
default_executor_def=executor_def,
default_logger_defs=loggers,
metadata=metadata,
_top_level_resources=resource_defs,
_component_tree=component_tree,
)
def created_repo():
return [
*with_resources(assets, resource_defs),
*with_resources(asset_checks or [], resource_defs),
*(schedules_with_resources),
*(sensors_with_resources),
*(jobs_with_resources),
]
return created_repo
def _canonicalize_specs_to_assets_defs(
assets: Iterable[Union[AssetsDefinition, AssetSpec, SourceAsset, CacheableAssetsDefinition]],
) -> Iterable[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition]]:
asset_specs_by_partitions_def = defaultdict(list)
for obj in assets:
if isinstance(obj, AssetSpec):
asset_specs_by_partitions_def[obj.partitions_def].append(obj)
result = [obj for obj in assets if not isinstance(obj, AssetSpec)]
for specs in asset_specs_by_partitions_def.values():
with disable_dagster_warnings():
result.append(AssetsDefinition(specs=specs))
return result
@deprecated(
breaking_version="2.0",
additional_warn_text=(
"Instantiations can be removed. Since it's behavior is now the default, this class is now a"
" no-op."
),
)
| _AttachedObjects |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/configuration_rt_detr_v2.py | {
"start": 1399,
"end": 18858
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`RTDetrV2Model`]. It is used to instantiate a
RT-DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the RT-DETR architecture.
e.g. [PekingU/rtdetr_r18vd](https://huggingface.co/PekingU/rtdetr_r18vd)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_bias_prior_prob (`float`, *optional*):
The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.
If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
backbone_config (`Dict`, *optional*, defaults to `RTDetrV2ResNetConfig()`):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`):
Whether to freeze the batch normalization layers in the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
Dimension of the layers in hybrid encoder.
encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`):
Multi level features input for encoder.
feat_strides (`list[int]`, *optional*, defaults to `[8, 16, 32]`):
Strides used in each feature map.
encoder_layers (`int`, *optional*, defaults to 1):
Total of layers to be used by the encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
encode_proj_layers (`list[int]`, *optional*, defaults to `[2]`):
Indexes of the projected layers to be used in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The temperature parameter used to create the positional encodings.
encoder_activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
activation_function (`str`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the general layer. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
eval_size (`tuple[int, int]`, *optional*):
Height and width used to compute the effective height and width of the position embeddings after taking
into account the stride.
normalize_before (`bool`, *optional*, defaults to `False`):
Determine whether to apply layer normalization in the transformer encoder layer before self-attention and
feed-forward modules.
hidden_expansion (`float`, *optional*, defaults to 1.0):
Expansion ratio to enlarge the dimension size of RepVGGBlock and CSPRepLayer.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers exclude hybrid encoder.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries.
decoder_in_channels (`list`, *optional*, defaults to `[256, 256, 256]`):
Multi level features dimension for decoder
decoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of input feature levels.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_activation_function (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_denoising (`int`, *optional*, defaults to 100):
The total number of denoising tasks or queries to be used for contrastive denoising.
label_noise_ratio (`float`, *optional*, defaults to 0.5):
The fraction of denoising labels to which random noise should be added.
box_noise_scale (`float`, *optional*, defaults to 1.0):
Scale or magnitude of noise to be added to the bounding boxes.
learn_initial_query (`bool`, *optional*, defaults to `False`):
Indicates whether the initial query embeddings for the decoder should be learned during training
anchor_image_size (`tuple[int, int]`, *optional*):
Height and width of the input image used during evaluation to generate the bounding box anchors. If None, automatic generate anchor is applied.
with_box_refine (`bool`, *optional*, defaults to `True`):
Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
based on the predictions from the previous layer.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the architecture has an encoder decoder structure.
matcher_alpha (`float`, *optional*, defaults to 0.25):
Parameter alpha used by the Hungarian Matcher.
matcher_gamma (`float`, *optional*, defaults to 2.0):
Parameter gamma used by the Hungarian Matcher.
matcher_class_cost (`float`, *optional*, defaults to 2.0):
The relative weight of the class loss used by the Hungarian Matcher.
matcher_bbox_cost (`float`, *optional*, defaults to 5.0):
The relative weight of the bounding box loss used by the Hungarian Matcher.
matcher_giou_cost (`float`, *optional*, defaults to 2.0):
The relative weight of the giou loss of used by the Hungarian Matcher.
use_focal_loss (`bool`, *optional*, defaults to `True`):
Parameter informing if focal loss should be used.
auxiliary_loss (`bool`, *optional*, defaults to `True`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
focal_loss_alpha (`float`, *optional*, defaults to 0.75):
Parameter alpha used to compute the focal loss.
focal_loss_gamma (`float`, *optional*, defaults to 2.0):
Parameter gamma used to compute the focal loss.
weight_loss_vfl (`float`, *optional*, defaults to 1.0):
Relative weight of the varifocal loss in the object detection loss.
weight_loss_bbox (`float`, *optional*, defaults to 5.0):
Relative weight of the L1 bounding box loss in the object detection loss.
weight_loss_giou (`float`, *optional*, defaults to 2.0):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.0001):
Relative classification weight of the 'no-object' class in the object detection loss.
decoder_n_levels (`int`, *optional*, defaults to 3):
The number of feature levels used by the decoder.
decoder_offset_scale (`float`, *optional*, defaults to 0.5):
Scaling factor applied to the attention offsets in the decoder.
decoder_method (`str`, *optional*, defaults to `"default"`):
The method to use for the decoder: `"default"` or `"discrete"`.
Examples:
```python
>>> from transformers import RTDetrV2Config, RTDetrV2Model
>>> # Initializing a RT-DETR configuration
>>> configuration = RTDetrV2Config()
>>> # Initializing a model (with random weights) from the configuration
>>> model = RTDetrV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "rt_detr_v2"
sub_configs = {"backbone_config": AutoConfig}
layer_types = ["basic", "bottleneck"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
initializer_range=0.01,
initializer_bias_prior_prob=None,
layer_norm_eps=1e-5,
batch_norm_eps=1e-5,
# backbone
backbone_config=None,
backbone=None,
use_pretrained_backbone=False,
use_timm_backbone=False,
freeze_backbone_batch_norms=True,
backbone_kwargs=None,
# encoder HybridEncoder
encoder_hidden_dim=256,
encoder_in_channels=[512, 1024, 2048],
feat_strides=[8, 16, 32],
encoder_layers=1,
encoder_ffn_dim=1024,
encoder_attention_heads=8,
dropout=0.0,
activation_dropout=0.0,
encode_proj_layers=[2],
positional_encoding_temperature=10000,
encoder_activation_function="gelu",
activation_function="silu",
eval_size=None,
normalize_before=False,
hidden_expansion=1.0,
# decoder RTDetrV2Transformer
d_model=256,
num_queries=300,
decoder_in_channels=[256, 256, 256],
decoder_ffn_dim=1024,
num_feature_levels=3,
decoder_n_points=4,
decoder_layers=6,
decoder_attention_heads=8,
decoder_activation_function="relu",
attention_dropout=0.0,
num_denoising=100,
label_noise_ratio=0.5,
box_noise_scale=1.0,
learn_initial_query=False,
anchor_image_size=None,
with_box_refine=True,
is_encoder_decoder=True,
# Loss
matcher_alpha=0.25,
matcher_gamma=2.0,
matcher_class_cost=2.0,
matcher_bbox_cost=5.0,
matcher_giou_cost=2.0,
use_focal_loss=True,
auxiliary_loss=True,
focal_loss_alpha=0.75,
focal_loss_gamma=2.0,
weight_loss_vfl=1.0,
weight_loss_bbox=5.0,
weight_loss_giou=2.0,
eos_coefficient=1e-4,
decoder_n_levels=3, # default value
decoder_offset_scale=0.5, # default value
decoder_method="default",
**kwargs,
):
self.initializer_range = initializer_range
self.initializer_bias_prior_prob = initializer_bias_prior_prob
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
# backbone
if backbone_config is None and backbone is None:
logger.info(
"`backbone_config` and `backbone` are `None`. Initializing the config with the default `RTDetrV2-ResNet` backbone."
)
backbone_model_type = "rt_detr_resnet"
config_class = CONFIG_MAPPING[backbone_model_type]
# this will map it to RTDetrResNetConfig
# note: we can instead create RTDetrV2ResNetConfig but it will be exactly the same as V1
# and we would need to create RTDetrV2ResNetModel
backbone_config = config_class(
num_channels=3,
embedding_size=64,
hidden_sizes=[256, 512, 1024, 2048],
depths=[3, 4, 6, 3],
layer_type="bottleneck",
hidden_act="relu",
downsample_in_first_stage=False,
downsample_in_bottleneck=False,
out_features=None,
out_indices=[2, 3, 4],
)
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.pop("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.freeze_backbone_batch_norms = freeze_backbone_batch_norms
self.backbone_kwargs = backbone_kwargs
# encoder
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.feat_strides = feat_strides
self.encoder_ffn_dim = encoder_ffn_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.encode_proj_layers = encode_proj_layers
self.encoder_layers = encoder_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.eval_size = eval_size
self.normalize_before = normalize_before
self.encoder_activation_function = encoder_activation_function
self.activation_function = activation_function
self.hidden_expansion = hidden_expansion
self.num_queries = num_queries
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_in_channels = decoder_in_channels
self.num_feature_levels = num_feature_levels
self.decoder_n_points = decoder_n_points
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_activation_function = decoder_activation_function
self.attention_dropout = attention_dropout
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
self.learn_initial_query = learn_initial_query
self.anchor_image_size = anchor_image_size
self.auxiliary_loss = auxiliary_loss
self.with_box_refine = with_box_refine
# Loss
self.matcher_alpha = matcher_alpha
self.matcher_gamma = matcher_gamma
self.matcher_class_cost = matcher_class_cost
self.matcher_bbox_cost = matcher_bbox_cost
self.matcher_giou_cost = matcher_giou_cost
self.use_focal_loss = use_focal_loss
self.focal_loss_alpha = focal_loss_alpha
self.focal_loss_gamma = focal_loss_gamma
self.weight_loss_vfl = weight_loss_vfl
self.weight_loss_bbox = weight_loss_bbox
self.weight_loss_giou = weight_loss_giou
self.eos_coefficient = eos_coefficient
if not hasattr(self, "d_model"):
self.d_model = d_model
if not hasattr(self, "encoder_attention_heads"):
self.encoder_attention_heads = encoder_attention_heads
# add the new attributes with the given values or defaults
self.decoder_n_levels = decoder_n_levels
self.decoder_offset_scale = decoder_offset_scale
self.decoder_method = decoder_method
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
self.tie_encoder_decoder = True
__all__ = ["RTDetrV2Config"]
| RTDetrV2Config |
python | django__django | tests/db_functions/math/test_log.py | {
"start": 180,
"end": 1858
} | class ____(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_log_small=Log("small", "normal"),
null_log_normal=Log("normal", "big"),
null_log_big=Log("big", "normal"),
).first()
self.assertIsNone(obj.null_log_small)
self.assertIsNone(obj.null_log_normal)
self.assertIsNone(obj.null_log_big)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("12.9"), n2=Decimal("3.6"))
obj = DecimalModel.objects.annotate(n_log=Log("n1", "n2")).first()
self.assertIsInstance(obj.n_log, Decimal)
self.assertAlmostEqual(obj.n_log, Decimal(math.log(obj.n2, obj.n1)))
def test_float(self):
FloatModel.objects.create(f1=2.0, f2=4.0)
obj = FloatModel.objects.annotate(f_log=Log("f1", "f2")).first()
self.assertIsInstance(obj.f_log, float)
self.assertAlmostEqual(obj.f_log, math.log(obj.f2, obj.f1))
def test_integer(self):
IntegerModel.objects.create(small=4, normal=8, big=2)
obj = IntegerModel.objects.annotate(
small_log=Log("small", "big"),
normal_log=Log("normal", "big"),
big_log=Log("big", "big"),
).first()
self.assertIsInstance(obj.small_log, float)
self.assertIsInstance(obj.normal_log, float)
self.assertIsInstance(obj.big_log, float)
self.assertAlmostEqual(obj.small_log, math.log(obj.big, obj.small))
self.assertAlmostEqual(obj.normal_log, math.log(obj.big, obj.normal))
self.assertAlmostEqual(obj.big_log, math.log(obj.big, obj.big))
| LogTests |
python | numba__numba | numba/tests/test_datamodel.py | {
"start": 757,
"end": 820
} | class ____(test_factory()):
fe_type = types.uint32
| TestUInt32 |
python | getsentry__sentry | src/sentry/snuba/query_subscriptions/consumer.py | {
"start": 7299,
"end": 7355
} | class ____(InvalidMessageError):
pass
| InvalidSchemaError |
python | pandas-dev__pandas | pandas/tests/api/test_api.py | {
"start": 937,
"end": 5879
} | class ____(Base):
# these are optionally imported based on testing
# & need to be ignored
ignored = ["tests", "locale", "conftest", "_version_meson"]
# top-level sub-packages
public_lib = [
"api",
"arrays",
"options",
"test",
"testing",
"errors",
"plotting",
"io",
"tseries",
]
private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"]
# misc
misc = ["IndexSlice", "NaT", "NA"]
# top-level classes
classes = [
"ArrowDtype",
"Categorical",
"CategoricalIndex",
"DataFrame",
"DateOffset",
"DatetimeIndex",
"ExcelFile",
"ExcelWriter",
"Flags",
"Grouper",
"HDFStore",
"Index",
"MultiIndex",
"Period",
"PeriodIndex",
"RangeIndex",
"Series",
"SparseDtype",
"StringDtype",
"Timedelta",
"TimedeltaIndex",
"Timestamp",
"Interval",
"IntervalIndex",
"CategoricalDtype",
"PeriodDtype",
"IntervalDtype",
"DatetimeTZDtype",
"BooleanDtype",
"Int8Dtype",
"Int16Dtype",
"Int32Dtype",
"Int64Dtype",
"UInt8Dtype",
"UInt16Dtype",
"UInt32Dtype",
"UInt64Dtype",
"Float32Dtype",
"Float64Dtype",
"NamedAgg",
]
# these are already deprecated; awaiting removal
deprecated_classes: list[str] = []
# external modules exposed in pandas namespace
modules: list[str] = []
# top-level functions
funcs = [
"array",
"bdate_range",
"col",
"concat",
"crosstab",
"cut",
"date_range",
"interval_range",
"eval",
"factorize",
"get_dummies",
"from_dummies",
"infer_freq",
"isna",
"isnull",
"lreshape",
"melt",
"notna",
"notnull",
"offsets",
"merge",
"merge_ordered",
"merge_asof",
"period_range",
"pivot",
"pivot_table",
"qcut",
"show_versions",
"timedelta_range",
"unique",
"wide_to_long",
]
# top-level option funcs
funcs_option = [
"reset_option",
"describe_option",
"get_option",
"option_context",
"set_option",
"set_eng_float_format",
]
# top-level read_* funcs
funcs_read = [
"read_clipboard",
"read_csv",
"read_excel",
"read_fwf",
"read_hdf",
"read_html",
"read_xml",
"read_json",
"read_pickle",
"read_sas",
"read_sql",
"read_sql_query",
"read_sql_table",
"read_stata",
"read_table",
"read_feather",
"read_parquet",
"read_orc",
"read_spss",
"read_iceberg",
]
# top-level json funcs
funcs_json = ["json_normalize"]
# top-level to_* funcs
funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"]
# top-level to deprecate in the future
deprecated_funcs_in_future: list[str] = []
# these are already deprecated; awaiting removal
deprecated_funcs: list[str] = []
# private modules in pandas namespace
private_modules = [
"_config",
"_libs",
"_is_numpy_dev",
"_pandas_datetime_CAPI",
"_pandas_parser_CAPI",
"_testing",
"_typing",
]
if not pd._built_with_meson:
private_modules.append("_version")
def test_api(self):
checkthese = (
self.public_lib
+ self.private_lib
+ self.misc
+ self.modules
+ self.classes
+ self.funcs
+ self.funcs_option
+ self.funcs_read
+ self.funcs_json
+ self.funcs_to
+ self.private_modules
)
self.check(namespace=pd, expected=checkthese, ignored=self.ignored)
def test_api_all(self):
expected = set(
self.public_lib
+ self.misc
+ self.modules
+ self.classes
+ self.funcs
+ self.funcs_option
+ self.funcs_read
+ self.funcs_json
+ self.funcs_to
) - set(self.deprecated_classes)
actual = set(pd.__all__)
extraneous = actual - expected
assert not extraneous
missing = expected - actual
assert not missing
def test_depr(self):
deprecated_list = (
self.deprecated_classes
+ self.deprecated_funcs
+ self.deprecated_funcs_in_future
)
for depr in deprecated_list:
with tm.assert_produces_warning(FutureWarning):
_ = getattr(pd, depr)
| TestPDApi |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 3619,
"end": 5342
} | class ____(XYGlyph, LineGlyph, FillGlyph, HatchGlyph):
''' Base class for glyphs that are simple markers with line and
fill properties, located at an (x, y) location with a specified
size.
See :class:`~bokeh.models.glyphs.Scatter` for an overview
of all the builtin marker types.
.. note::
For simplicity, all markers have both line and fill properties
declared, however some marker types (`asterisk`, `cross`, `x`)
only draw lines. For these markers, the fill values are simply
ignored.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
_args = ('x', 'y', 'size', 'angle')
x = NumberSpec(default=field("x"), help="""
The x-axis coordinates for the center of the markers.
""")
y = NumberSpec(default=field("y"), help="""
The y-axis coordinates for the center of the markers.
""")
hit_dilation = Size(default=1.0, help="""
The factor by which to dilate the hit radius
which is responsible for defining the range in which a
marker responds to interactions with the Hover and Tap
tools.
""")
size = SizeSpec(default=4, help="""
The size (diameter) values for the markers in screen space units.
""")
angle = AngleSpec(default=0.0, help="""
The angles to rotate the markers.
""")
line_props = Include(LineProps, help="""
The {prop} values for the markers.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the markers.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the markers.
""")
@abstract
| Marker |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_proportion_of_non_null_values_to_be_between.py | {
"start": 2807,
"end": 17884
} | class ____(ColumnAggregateExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
For example, in a column containing [1, 2, None, 3, None, None, 4, 4, 4, 4], there are \
7 non-null values and 10 total values for a proportion of 0.7.
ExpectColumnProportionOfNonNullValuesToBeBetween is a \
Column Aggregate Expectation.
Column Aggregate Expectations are one of the most common types of Expectation.
They are evaluated for a single column, and produce an aggregate Metric, such as a \
mean, standard deviation, number of unique values, column type, etc.
If that Metric meets the conditions you set, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
min_value (float or None): \
{MIN_VALUE_DESCRIPTION}
max_value (float or None): \
{MAX_VALUE_DESCRIPTION}
strict_min (boolean): \
{STRICT_MIN_DESCRIPTION} default=False
strict_max (boolean): \
{STRICT_MAX_DESCRIPTION} default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the \
output without modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to \
result_format, catch_exceptions, and meta.
Notes:
* min_value and max_value are both inclusive unless \
strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
* observed_value field in the result object is customized for this expectation to be \
a float representing the proportion of non-null values in the column
See Also:
[ExpectColumnProportionOfUniqueValuesToBeBetween](https://greatexpectations.io/expectations/expect_column_proportion_of_unique_values_to_be_between)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 "aaa" 1
1 "abb" None
2 "acc" 1
3 None 3
Code Examples:
Passing Case:
Input:
ExpectColumnProportionOfNonNullValuesToBeBetween(
column="test",
min_value=0,
max_value=0.8
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 0.75
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnProportionOfNonNullValuesToBeBetween(
column="test2",
min_value=0.3,
max_value=0.5,
strict_min=False,
strict_max=True
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 0.75
}},
"meta": {{}},
"success": false
}}
"""
min_value: Optional[Union[float, SuiteParameterDict]] = pydantic.Field(
default=None, description=MIN_VALUE_DESCRIPTION
)
max_value: Optional[Union[float, SuiteParameterDict]] = pydantic.Field(
default=None, description=MAX_VALUE_DESCRIPTION
)
strict_min: bool = pydantic.Field(default=False, description=STRICT_MIN_DESCRIPTION)
strict_max: bool = pydantic.Field(default=False, description=STRICT_MAX_DESCRIPTION)
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
metric_dependencies = ("column.non_null_proportion",)
success_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
)
args_keys = (
"column",
"min_value",
"max_value",
"strict_min",
"strict_max",
)
class Config:
title = "Expect column proportion of non-null values to be between"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectColumnProportionOfNonNullValuesToBeBetween]
) -> None:
ColumnAggregateExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _get_min_max_string(cls, renderer_configuration: RendererConfiguration) -> str:
params = renderer_configuration.params
if not params.min_value and not params.max_value:
return "may have any proportion of non-null values."
else:
at_least_str = "greater than or equal to"
if params.strict_min:
at_least_str = cls._get_strict_min_string(
renderer_configuration=renderer_configuration
)
at_most_str = "less than or equal to"
if params.strict_max:
at_most_str = cls._get_strict_max_string(
renderer_configuration=renderer_configuration
)
if not params.min_value:
return f"proportion of non-null values must be {at_most_str} $max_value."
elif not params.max_value:
return f"proportion of non-null values must be {at_least_str} $min_value."
elif params.min_value.value != params.max_value.value:
return (
f"proportion of non-null values must be {at_least_str} $min_value "
f"and {at_most_str} $max_value."
)
else:
return "proportion of non-null values must be exactly $min_value."
@classmethod
@override
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("min_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("max_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("strict_min", RendererValueType.BOOLEAN),
("strict_max", RendererValueType.BOOLEAN),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
template_str = cls._get_min_max_string(renderer_configuration)
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@override
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
) -> list[RenderedStringTemplateContent]:
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = (
substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
if configuration
else {}
)
if params["min_value"] is None and params["max_value"] is None:
template_str = "may have any proportion of non-null values."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is None:
template_str = f"proportion of non-null values must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"proportion of non-null values must be {at_least_str} $min_value."
elif params["min_value"] != params["max_value"]:
template_str = (
f"proportion of non-null values must be {at_least_str} $min_value "
f"and {at_most_str} $max_value."
)
else:
template_str = "proportion of non-null values must be exactly $min_value."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": template_str,
"params": params,
"styling": styling,
},
),
]
@classmethod
@renderer(
renderer_type=LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_DISTINCT_PERCENT_ROW
)
def _descriptive_column_properties_table_distinct_percent_row_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
) -> list[Union[RenderedStringTemplateContent, str]]:
assert result, "Must pass in result."
observed_value = result.result["observed_value"]
template_string_object = RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": "Non-null (%)",
"tooltip": {"content": "expect_column_proportion_of_non_null_values_to_be_between"},
},
)
if not observed_value:
return [template_string_object, "--"]
else:
return [template_string_object, f"{100 * observed_value:.1f}%"]
@override
def _validate(
self,
metrics: dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
return self._validate_metric_value_between(
metric_name="column.non_null_proportion",
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| ExpectColumnProportionOfNonNullValuesToBeBetween |
python | celery__celery | t/unit/worker/test_control.py | {
"start": 3287,
"end": 28525
} | class ____:
def setup_method(self):
self.panel = self.create_panel(consumer=Consumer(self.app))
@self.app.task(name='c.unittest.mytask', rate_limit=200, shared=False)
def mytask():
pass
self.mytask = mytask
def create_state(self, **kwargs):
kwargs.setdefault('app', self.app)
kwargs.setdefault('hostname', hostname)
kwargs.setdefault('tset', set)
return AttributeDict(kwargs)
def create_panel(self, **kwargs):
return self.app.control.mailbox.Node(
hostname=hostname,
state=self.create_state(**kwargs),
handlers=control.Panel.data,
)
def test_enable_events(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
evd = consumer.event_dispatcher
evd.groups = set()
panel.handle('enable_events')
assert not evd.groups
evd.groups = {'worker'}
panel.handle('enable_events')
assert 'task' in evd.groups
evd.groups = {'task'}
assert 'already enabled' in panel.handle('enable_events')['ok']
def test_disable_events(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
evd = consumer.event_dispatcher
evd.enabled = True
evd.groups = {'task'}
panel.handle('disable_events')
assert 'task' not in evd.groups
assert 'already disabled' in panel.handle('disable_events')['ok']
def test_clock(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
panel.state.app.clock.value = 313
x = panel.handle('clock')
assert x['clock'] == 313
def test_hello(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
panel.state.app.clock.value = 313
panel.state.hostname = 'elaine@vandelay.com'
worker_state.revoked.add('revoked1')
try:
assert panel.handle('hello', {
'from_node': 'elaine@vandelay.com',
}) is None
x = panel.handle('hello', {
'from_node': 'george@vandelay.com',
})
assert x['clock'] == 314 # incremented
x = panel.handle('hello', {
'from_node': 'george@vandelay.com',
'revoked': {'1234', '4567', '891'}
})
assert 'revoked1' in x['revoked']
assert '1234' in x['revoked']
assert '4567' in x['revoked']
assert '891' in x['revoked']
assert x['clock'] == 315 # incremented
finally:
worker_state.revoked.discard('revoked1')
def test_hello_does_not_send_expired_revoked_items(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
panel.state.app.clock.value = 313
panel.state.hostname = 'elaine@vandelay.com'
# Add an expired revoked item to the revoked set.
worker_state.revoked.add(
'expired_in_past',
now=time.monotonic() - REVOKE_EXPIRES - 1
)
x = panel.handle('hello', {
'from_node': 'george@vandelay.com',
'revoked': {'1234', '4567', '891'}
})
assert 'expired_in_past' not in x['revoked']
def test_conf(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
panel.app = self.app
panel.app.finalize()
self.app.conf.some_key6 = 'hello world'
x = panel.handle('dump_conf')
assert 'some_key6' in x
def test_election(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
consumer.gossip = Mock()
panel.handle(
'election', {'id': 'id', 'topic': 'topic', 'action': 'action'},
)
consumer.gossip.election.assert_called_with('id', 'topic', 'action')
def test_election__no_gossip(self):
consumer = Mock(name='consumer')
consumer.gossip = None
panel = self.create_panel(consumer=consumer)
panel.handle(
'election', {'id': 'id', 'topic': 'topic', 'action': 'action'},
)
def test_heartbeat(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
event_dispatcher = consumer.event_dispatcher
event_dispatcher.enabled = True
panel.handle('heartbeat')
assert ('worker-heartbeat',) in event_dispatcher.send.call_args
def test_time_limit(self):
panel = self.create_panel(consumer=Mock())
r = panel.handle('time_limit', arguments={
'task_name': self.mytask.name, 'hard': 30, 'soft': 10})
assert self.mytask.time_limit == 30
assert self.mytask.soft_time_limit == 10
assert 'ok' in r
r = panel.handle('time_limit', arguments={
'task_name': self.mytask.name, 'hard': None, 'soft': None})
assert self.mytask.time_limit is None
assert self.mytask.soft_time_limit is None
assert 'ok' in r
r = panel.handle('time_limit', arguments={
'task_name': '248e8afya9s8dh921eh928', 'hard': 30})
assert 'error' in r
def test_active_queues(self):
import kombu
x = kombu.Consumer(self.app.connection_for_read(),
[kombu.Queue('foo', kombu.Exchange('foo'), 'foo'),
kombu.Queue('bar', kombu.Exchange('bar'), 'bar')],
auto_declare=False)
consumer = Mock()
consumer.task_consumer = x
panel = self.create_panel(consumer=consumer)
r = panel.handle('active_queues')
assert list(sorted(q['name'] for q in r)) == ['bar', 'foo']
def test_active_queues__empty(self):
consumer = Mock(name='consumer')
panel = self.create_panel(consumer=consumer)
consumer.task_consumer = None
assert not panel.handle('active_queues')
def test_dump_tasks(self):
info = '\n'.join(self.panel.handle('dump_tasks'))
assert 'mytask' in info
assert 'rate_limit=200' in info
def test_dump_tasks2(self):
prev, control.DEFAULT_TASK_INFO_ITEMS = (
control.DEFAULT_TASK_INFO_ITEMS, [])
try:
info = '\n'.join(self.panel.handle('dump_tasks'))
assert 'mytask' in info
assert 'rate_limit=200' not in info
finally:
control.DEFAULT_TASK_INFO_ITEMS = prev
def test_stats(self):
prev_count, worker_state.total_count = worker_state.total_count, 100
try:
assert self.panel.handle('stats')['total'] == 100
finally:
worker_state.total_count = prev_count
def test_report(self):
self.panel.handle('report')
def test_active(self):
r = Request(
self.TaskMessage(self.mytask.name, 'do re mi'),
app=self.app,
)
worker_state.active_requests.add(r)
try:
assert self.panel.handle('dump_active')
finally:
worker_state.active_requests.discard(r)
def test_active_safe(self):
kwargsrepr = '<anything>'
r = Request(
self.TaskMessage(self.mytask.name, id='do re mi',
kwargsrepr=kwargsrepr),
app=self.app,
)
worker_state.active_requests.add(r)
try:
active_resp = self.panel.handle('dump_active', {'safe': True})
assert active_resp[0]['kwargs'] == kwargsrepr
finally:
worker_state.active_requests.discard(r)
def test_pool_grow(self):
class MockPool:
def __init__(self, size=1):
self.size = size
def grow(self, n=1):
self.size += n
def shrink(self, n=1):
self.size -= n
@property
def num_processes(self):
return self.size
consumer = Consumer(self.app)
consumer.prefetch_multiplier = 8
consumer.qos = Mock(name='qos')
consumer.pool = MockPool(1)
panel = self.create_panel(consumer=consumer)
panel.handle('pool_grow')
assert consumer.pool.size == 2
consumer.qos.increment_eventually.assert_called_with(8)
assert consumer.initial_prefetch_count == 16
panel.handle('pool_shrink')
assert consumer.pool.size == 1
consumer.qos.decrement_eventually.assert_called_with(8)
assert consumer.initial_prefetch_count == 8
panel.state.consumer = Mock()
panel.state.consumer.controller = Mock()
r = panel.handle('pool_grow')
assert 'error' in r
r = panel.handle('pool_shrink')
assert 'error' in r
def test_add__cancel_consumer(self):
class MockConsumer:
queues = []
canceled = []
consuming = False
hub = Mock(name='hub')
def add_queue(self, queue):
self.queues.append(queue.name)
def consume(self):
self.consuming = True
def cancel_by_queue(self, queue):
self.canceled.append(queue)
def consuming_from(self, queue):
return queue in self.queues
consumer = Consumer(self.app)
consumer.task_consumer = MockConsumer()
panel = self.create_panel(consumer=consumer)
panel.handle('add_consumer', {'queue': 'MyQueue'})
assert 'MyQueue' in consumer.task_consumer.queues
assert consumer.task_consumer.consuming
panel.handle('add_consumer', {'queue': 'MyQueue'})
panel.handle('cancel_consumer', {'queue': 'MyQueue'})
assert 'MyQueue' in consumer.task_consumer.canceled
def test_revoked(self):
worker_state.revoked.clear()
worker_state.revoked.add('a1')
worker_state.revoked.add('a2')
try:
assert sorted(self.panel.handle('dump_revoked')) == ['a1', 'a2']
finally:
worker_state.revoked.clear()
def test_dump_schedule(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
assert not panel.handle('dump_schedule')
r = Request(
self.TaskMessage(self.mytask.name, 'CAFEBABE'),
app=self.app,
)
consumer.timer.schedule.enter_at(
consumer.timer.Entry(lambda x: x, (r,)),
datetime.now() + timedelta(seconds=10))
consumer.timer.schedule.enter_at(
consumer.timer.Entry(lambda x: x, (object(),)),
datetime.now() + timedelta(seconds=10))
assert panel.handle('dump_schedule')
def test_dump_reserved(self):
consumer = Consumer(self.app)
req = Request(
self.TaskMessage(self.mytask.name, args=(2, 2)), app=self.app,
) # ^ need to keep reference for reserved_tasks WeakSet.
worker_state.task_reserved(req)
try:
panel = self.create_panel(consumer=consumer)
response = panel.handle('dump_reserved', {'safe': True})
assert response[0]['name'] == self.mytask.name
assert response[0]['hostname'] == socket.gethostname()
worker_state.reserved_requests.clear()
assert not panel.handle('dump_reserved')
finally:
worker_state.reserved_requests.clear()
def test_rate_limit_invalid_rate_limit_string(self):
e = self.panel.handle('rate_limit', arguments={
'task_name': 'tasks.add', 'rate_limit': 'x1240301#%!'})
assert 'Invalid rate limit string' in e.get('error')
def test_rate_limit(self):
class xConsumer:
reset = False
def reset_rate_limits(self):
self.reset = True
consumer = xConsumer()
panel = self.create_panel(app=self.app, consumer=consumer)
task = self.app.tasks[self.mytask.name]
panel.handle('rate_limit', arguments={'task_name': task.name,
'rate_limit': '100/m'})
assert task.rate_limit == '100/m'
assert consumer.reset
consumer.reset = False
panel.handle('rate_limit', arguments={
'task_name': task.name,
'rate_limit': 0,
})
assert task.rate_limit == 0
assert consumer.reset
def test_rate_limit_nonexistant_task(self):
self.panel.handle('rate_limit', arguments={
'task_name': 'xxxx.does.not.exist',
'rate_limit': '1000/s'})
def test_unexposed_command(self):
with pytest.raises(KeyError):
self.panel.handle('foo', arguments={})
def test_revoke_with_name(self):
tid = uuid()
m = {
'method': 'revoke',
'destination': hostname,
'arguments': {
'task_id': tid,
'task_name': self.mytask.name,
},
}
self.panel.handle_message(m, None)
assert tid in revoked
def test_revoke_with_name_not_in_registry(self):
tid = uuid()
m = {
'method': 'revoke',
'destination': hostname,
'arguments': {
'task_id': tid,
'task_name': 'xxxxxxxxx33333333388888',
},
}
self.panel.handle_message(m, None)
assert tid in revoked
def test_revoke(self):
tid = uuid()
m = {
'method': 'revoke',
'destination': hostname,
'arguments': {
'task_id': tid,
},
}
self.panel.handle_message(m, None)
assert tid in revoked
m = {
'method': 'revoke',
'destination': 'does.not.exist',
'arguments': {
'task_id': tid + 'xxx',
},
}
self.panel.handle_message(m, None)
assert tid + 'xxx' not in revoked
def test_revoke_terminate(self):
request = Mock()
request.id = tid = uuid()
state = self.create_state()
state.consumer = Mock()
worker_state.task_reserved(request)
try:
r = control.revoke(state, tid, terminate=True)
assert tid in revoked
assert request.terminate.call_count
assert 'terminate:' in r['ok']
# unknown task id only revokes
r = control.revoke(state, uuid(), terminate=True)
assert 'tasks unknown' in r['ok']
finally:
worker_state.task_ready(request)
@pytest.mark.parametrize(
"terminate", [True, False],
)
def test_revoke_by_stamped_headers_terminate(self, terminate):
request = Mock()
request.id = uuid()
request.options = stamped_header = {'stamp': 'foo'}
request.options['stamped_headers'] = ['stamp']
state = self.create_state()
state.consumer = Mock()
worker_state.task_reserved(request)
try:
worker_state.revoked_stamps.clear()
assert stamped_header.keys() != revoked_stamps.keys()
control.revoke_by_stamped_headers(state, stamped_header, terminate=terminate)
assert stamped_header.keys() == revoked_stamps.keys()
for key in stamped_header.keys():
assert maybe_list(stamped_header[key]) == revoked_stamps[key]
finally:
worker_state.task_ready(request)
@pytest.mark.parametrize(
"header_to_revoke",
[
{'header_A': 'value_1'},
{'header_B': ['value_2', 'value_3']},
{'header_C': ('value_2', 'value_3')},
{'header_D': {'value_2', 'value_3'}},
{'header_E': [1, '2', 3.0]},
],
)
def test_revoke_by_stamped_headers(self, header_to_revoke):
ids = []
# Create at least more than one request with the same stamped header
for _ in range(2):
headers = {
"id": uuid(),
"task": self.mytask.name,
"stamped_headers": header_to_revoke.keys(),
"stamps": header_to_revoke,
}
ids.append(headers["id"])
message = self.TaskMessage(
self.mytask.name,
"do re mi",
)
message.headers.update(headers)
request = Request(
message,
app=self.app,
)
# Add the request to the active_requests so the request is found
# when the revoke_by_stamped_headers is called
worker_state.active_requests.add(request)
worker_state.task_reserved(request)
state = self.create_state()
state.consumer = Mock()
# Revoke by header
revoked_stamps.clear()
r = control.revoke_by_stamped_headers(state, header_to_revoke, terminate=True)
# Check all of the requests were revoked by a single header
for header, stamp in header_to_revoke.items():
assert header in r['ok']
for s in maybe_list(stamp):
assert str(s) in r['ok']
assert header_to_revoke.keys() == revoked_stamps.keys()
for key in header_to_revoke.keys():
assert list(maybe_list(header_to_revoke[key])) == revoked_stamps[key]
revoked_stamps.clear()
def test_revoke_return_value_terminate_true(self):
header_to_revoke = {'foo': 'bar'}
headers = {
"id": uuid(),
"task": self.mytask.name,
"stamped_headers": header_to_revoke.keys(),
"stamps": header_to_revoke,
}
message = self.TaskMessage(
self.mytask.name,
"do re mi",
)
message.headers.update(headers)
request = Request(
message,
app=self.app,
)
worker_state.active_requests.add(request)
worker_state.task_reserved(request)
state = self.create_state()
state.consumer = Mock()
r_headers = control.revoke_by_stamped_headers(state, header_to_revoke, terminate=True)
# revoke & revoke_by_stamped_headers are not aligned anymore in their return values
assert "{'foo': {'bar'}}" in r_headers["ok"]
def test_autoscale(self):
self.panel.state.consumer = Mock()
self.panel.state.consumer.controller = Mock()
sc = self.panel.state.consumer.controller.autoscaler = Mock()
sc.update.return_value = 10, 2
m = {'method': 'autoscale',
'destination': hostname,
'arguments': {'max': '10', 'min': '2'}}
r = self.panel.handle_message(m, None)
assert 'ok' in r
self.panel.state.consumer.controller.autoscaler = None
r = self.panel.handle_message(m, None)
assert 'error' in r
def test_ping(self):
m = {'method': 'ping',
'destination': hostname}
r = self.panel.handle_message(m, None)
assert r == {'ok': 'pong'}
def test_shutdown(self):
m = {'method': 'shutdown',
'destination': hostname}
with pytest.raises(SystemExit) as excinfo:
self.panel.handle_message(m, None)
assert excinfo.value.code == 0
def test_panel_reply(self):
replies = []
class _Node(pidbox.Node):
def reply(self, data, exchange, routing_key, **kwargs):
replies.append(data)
panel = _Node(
hostname=hostname,
state=self.create_state(consumer=Consumer(self.app)),
handlers=control.Panel.data,
mailbox=self.app.control.mailbox,
)
r = panel.dispatch('ping', reply_to={
'exchange': 'x',
'routing_key': 'x',
})
assert r == {'ok': 'pong'}
assert replies[0] == {panel.hostname: {'ok': 'pong'}}
def test_pool_restart(self):
consumer = Consumer(self.app)
consumer.controller = _WC(app=self.app)
consumer.controller.consumer = consumer
consumer.controller.pool.restart = Mock()
consumer.reset_rate_limits = Mock(name='reset_rate_limits()')
consumer.update_strategies = Mock(name='update_strategies()')
consumer.event_dispatcher = Mock(name='evd')
panel = self.create_panel(consumer=consumer)
assert panel.state.consumer.controller.consumer is consumer
panel.app = self.app
_import = panel.app.loader.import_from_cwd = Mock()
_reload = Mock()
with pytest.raises(ValueError):
panel.handle('pool_restart', {'reloader': _reload})
self.app.conf.worker_pool_restarts = True
panel.handle('pool_restart', {'reloader': _reload})
consumer.controller.pool.restart.assert_called()
consumer.reset_rate_limits.assert_called_with()
consumer.update_strategies.assert_called_with()
_reload.assert_not_called()
_import.assert_not_called()
consumer.controller.pool.restart.side_effect = NotImplementedError()
panel.handle('pool_restart', {'reloader': _reload})
consumer.controller.consumer = None
panel.handle('pool_restart', {'reloader': _reload})
@pytest.mark.skipif(IS_PYPY, reason="Patch for sys.modules doesn't work on PyPy correctly")
@patch('celery.worker.worker.logger.debug')
def test_pool_restart_import_modules(self, _debug):
consumer = Consumer(self.app)
consumer.controller = _WC(app=self.app)
consumer.controller.consumer = consumer
consumer.controller.pool.restart = Mock()
consumer.reset_rate_limits = Mock(name='reset_rate_limits()')
consumer.update_strategies = Mock(name='update_strategies()')
panel = self.create_panel(consumer=consumer)
panel.app = self.app
assert panel.state.consumer.controller.consumer is consumer
_import = consumer.controller.app.loader.import_from_cwd = Mock()
_reload = Mock()
self.app.conf.worker_pool_restarts = True
with patch('sys.modules'):
panel.handle('pool_restart', {
'modules': ['foo', 'bar'],
'reloader': _reload,
})
consumer.controller.pool.restart.assert_called()
consumer.reset_rate_limits.assert_called_with()
consumer.update_strategies.assert_called_with()
_reload.assert_not_called()
_import.assert_has_calls([call('bar'), call('foo')], any_order=True)
assert _import.call_count == 2
def test_pool_restart_reload_modules(self):
consumer = Consumer(self.app)
consumer.controller = _WC(app=self.app)
consumer.controller.consumer = consumer
consumer.controller.pool.restart = Mock()
consumer.reset_rate_limits = Mock(name='reset_rate_limits()')
consumer.update_strategies = Mock(name='update_strategies()')
panel = self.create_panel(consumer=consumer)
panel.app = self.app
_import = panel.app.loader.import_from_cwd = Mock()
_reload = Mock()
self.app.conf.worker_pool_restarts = True
with patch.dict(sys.modules, {'foo': None}):
panel.handle('pool_restart', {
'modules': ['foo'],
'reload': False,
'reloader': _reload,
})
consumer.controller.pool.restart.assert_called()
_reload.assert_not_called()
_import.assert_not_called()
_import.reset_mock()
_reload.reset_mock()
consumer.controller.pool.restart.reset_mock()
panel.handle('pool_restart', {
'modules': ['foo'],
'reload': True,
'reloader': _reload,
})
consumer.controller.pool.restart.assert_called()
_reload.assert_called()
_import.assert_not_called()
def test_query_task(self):
consumer = Consumer(self.app)
consumer.controller = _WC(app=self.app)
consumer.controller.consumer = consumer
panel = self.create_panel(consumer=consumer)
panel.app = self.app
req1 = Request(
self.TaskMessage(self.mytask.name, args=(2, 2)),
app=self.app,
)
worker_state.task_reserved(req1)
try:
assert not panel.handle('query_task', {'ids': {'1daa'}})
ret = panel.handle('query_task', {'ids': {req1.id}})
assert req1.id in ret
assert ret[req1.id][0] == 'reserved'
worker_state.active_requests.add(req1)
try:
ret = panel.handle('query_task', {'ids': {req1.id}})
assert ret[req1.id][0] == 'active'
finally:
worker_state.active_requests.clear()
ret = panel.handle('query_task', {'ids': {req1.id}})
assert ret[req1.id][0] == 'reserved'
finally:
worker_state.reserved_requests.clear()
| test_ControlPanel |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 67216,
"end": 70340
} | class ____:
def test_test_interning(self):
a0 = np.bool(0)
b0 = np.bool(False)
assert_(a0 is b0)
a1 = np.bool(1)
b1 = np.bool(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o + 1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o + 1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.str_)
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
| TestBool |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1056322,
"end": 1059541
} | class ____(sgqlc.types.Type, Node):
"""A GitHub App."""
__schema__ = github_schema
__field_names__ = (
"created_at",
"database_id",
"description",
"ip_allow_list_entries",
"logo_background_color",
"logo_url",
"name",
"slug",
"updated_at",
"url",
)
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
description = sgqlc.types.Field(String, graphql_name="description")
"""The description of the app."""
ip_allow_list_entries = sgqlc.types.Field(
sgqlc.types.non_null(IpAllowListEntryConnection),
graphql_name="ipAllowListEntries",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
IpAllowListEntryOrder, graphql_name="orderBy", default={"field": "ALLOW_LIST_VALUE", "direction": "ASC"}
),
),
)
),
)
"""The IP addresses of the app.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`IpAllowListEntryOrder`): Ordering options for IP
allow list entries returned. (default: `{field:
ALLOW_LIST_VALUE, direction: ASC}`)
"""
logo_background_color = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="logoBackgroundColor")
"""The hex color code, without the leading '#', for the logo
background.
"""
logo_url = sgqlc.types.Field(
sgqlc.types.non_null(URI),
graphql_name="logoUrl",
args=sgqlc.types.ArgDict((("size", sgqlc.types.Arg(Int, graphql_name="size", default=None)),)),
)
"""A URL pointing to the app's logo.
Arguments:
* `size` (`Int`): The size of the resulting image.
"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The name of the app."""
slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug")
"""A slug based on the name of the app for use in URLs."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The URL to the app's homepage."""
| App |
python | PyCQA__pylint | tests/functional/c/ctor_arguments.py | {
"start": 269,
"end": 399
} | class ____:
def __init__(self, first_argument, second_argument, third_argument):
"""three arguments function"""
| Class3Arg |
python | django__django | tests/queries/tests.py | {
"start": 178170,
"end": 178537
} | class ____(SimpleTestCase):
def test_invalid_values(self):
msg = "Field 'id' expected a number but got 'abc'."
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag="abc")
with self.assertRaisesMessage(ValueError, msg):
Annotation.objects.filter(tag__in=[123, "abc"])
| TestInvalidValuesRelation |
python | fastai__fastai | fastai/text/models/core.py | {
"start": 2380,
"end": 3897
} | class ____(nn.Sequential):
"A sequential module that passes the reset call to its children."
def reset(self):
for c in self.children(): getcallable(c, 'reset')()
# %% ../../../nbs/33_text.models.core.ipynb 12
def get_language_model(
arch, # Function or class that can generate a language model architecture
vocab_sz:int, # Size of the vocabulary
config:dict=None, # Model configuration dictionary
drop_mult:float=1. # Multiplicative factor to scale all dropout probabilities in `config`
) -> SequentialRNN: # Language model with `arch` encoder and linear decoder
"Create a language model from `arch` and its `config`."
meta = _model_meta[arch]
config = ifnone(config, meta['config_lm']).copy()
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
tie_weights,output_p,out_bias = map(config.pop, ['tie_weights', 'output_p', 'out_bias'])
init = config.pop('init') if 'init' in config else None
encoder = arch(vocab_sz, **config)
enc = encoder.encoder if tie_weights else None
decoder = LinearDecoder(vocab_sz, config[meta['hid_name']], output_p, tie_encoder=enc, bias=out_bias)
model = SequentialRNN(encoder, decoder)
return model if init is None else model.apply(init)
# %% ../../../nbs/33_text.models.core.ipynb 17
def _pad_tensor(t:Tensor, bs:int) -> Tensor:
if t.size(0) < bs: return torch.cat([t, t.new_zeros(bs-t.size(0), *t.shape[1:])])
return t
# %% ../../../nbs/33_text.models.core.ipynb 18
| SequentialRNN |
python | rq__rq | rq/scheduler.py | {
"start": 854,
"end": 8360
} | class ____:
# STARTED: scheduler has been started but sleeping
# WORKING: scheduler is in the midst of scheduling jobs
# STOPPED: scheduler is in stopped condition
Status = SchedulerStatus
def __init__(
self,
queues,
connection: Redis,
interval=1,
logging_level: Union[str, int] = logging.INFO,
date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT,
serializer=None,
):
self._queue_names = set(parse_names(queues))
self._acquired_locks: set[str] = set()
self._scheduled_job_registries: list[ScheduledJobRegistry] = []
self.lock_acquisition_time = None
self._connection_class, self._pool_class, self._pool_kwargs = parse_connection(connection)
self.serializer = resolve_serializer(serializer)
self._connection = None
self.interval = interval
self._stop_requested = False
self._status = self.Status.STOPPED
self._process = None
self.log = logging.getLogger(__name__)
setup_loghandlers(
level=logging_level,
name=__name__,
log_format=log_format,
date_format=date_format,
)
@property
def connection(self):
if self._connection:
return self._connection
self._connection = self._connection_class(
connection_pool=ConnectionPool(connection_class=self._pool_class, **self._pool_kwargs)
)
return self._connection
@property
def acquired_locks(self):
return self._acquired_locks
@property
def status(self):
return self._status
@property
def should_reacquire_locks(self):
"""Returns True if lock_acquisition_time is longer than 10 minutes ago"""
if self._queue_names == self.acquired_locks:
return False
if not self.lock_acquisition_time:
return True
return (datetime.now() - self.lock_acquisition_time).total_seconds() > DEFAULT_SCHEDULER_FALLBACK_PERIOD
def acquire_locks(self, auto_start=False):
"""Returns names of queue it successfully acquires lock on"""
successful_locks = set()
pid = os.getpid()
self.log.debug('Acquiring scheduler lock for %s', ', '.join(self._queue_names))
for name in self._queue_names:
if self.connection.set(self.get_locking_key(name), pid, nx=True, ex=self.interval + 60):
self.log.info('Acquired scheduler lock for %s', name)
successful_locks.add(name)
# Always reset _scheduled_job_registries when acquiring locks
self._scheduled_job_registries = []
self._acquired_locks = self._acquired_locks.union(successful_locks)
self.lock_acquisition_time = datetime.now()
# If auto_start is requested and scheduler is not started,
# run self.start()
if self._acquired_locks and auto_start:
if not self._process or not self._process.is_alive():
self.start()
return successful_locks
def prepare_registries(self, queue_names: Optional[Iterable[str]] = None):
"""Prepare scheduled job registries for use"""
self._scheduled_job_registries = []
if not queue_names:
queue_names = self._acquired_locks
for name in queue_names:
self._scheduled_job_registries.append(
ScheduledJobRegistry(name, connection=self.connection, serializer=self.serializer)
)
@classmethod
def get_locking_key(cls, name: str):
"""Returns scheduler key for a given queue name"""
return SCHEDULER_LOCKING_KEY_TEMPLATE % name
def enqueue_scheduled_jobs(self):
"""Enqueue jobs whose timestamp is in the past"""
self._status = self.Status.WORKING
if not self._scheduled_job_registries and self._acquired_locks:
self.prepare_registries()
for registry in self._scheduled_job_registries:
timestamp = current_timestamp()
# TODO: try to use Lua script to make get_jobs_to_schedule()
# and remove_jobs() atomic
job_ids = registry.get_jobs_to_schedule(timestamp)
if not job_ids:
continue
queue = Queue(registry.name, connection=self.connection, serializer=self.serializer)
with self.connection.pipeline() as pipeline:
jobs = Job.fetch_many(job_ids, connection=self.connection, serializer=self.serializer)
for job in jobs:
if job is not None:
queue._enqueue_job(job, pipeline=pipeline, at_front=bool(job.enqueue_at_front))
for job_id in job_ids:
registry.remove(job_id, pipeline=pipeline)
pipeline.execute()
self._status = self.Status.STARTED
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def request_stop(self, signum=None, frame=None):
"""Toggle self._stop_requested that's checked on every loop"""
self._stop_requested = True
def heartbeat(self):
"""Updates the TTL on scheduler keys and the locks"""
self.log.debug('Scheduler sending heartbeat to %s', ', '.join(self.acquired_locks))
if len(self._acquired_locks) > 1:
with self.connection.pipeline() as pipeline:
for name in self._acquired_locks:
key = self.get_locking_key(name)
pipeline.expire(key, self.interval + 60)
pipeline.execute()
elif self._acquired_locks:
key = self.get_locking_key(next(iter(self._acquired_locks)))
self.connection.expire(key, self.interval + 60)
def stop(self):
self.log.info('Scheduler stopping, releasing locks for %s...', ', '.join(self._acquired_locks))
self.release_locks()
self._status = self.Status.STOPPED
def release_locks(self):
"""Release acquired locks"""
keys = [self.get_locking_key(name) for name in self._acquired_locks]
self.connection.delete(*keys)
self._acquired_locks = set()
def start(self):
self._status = self.Status.STARTED
# Redis instance can't be pickled across processes so we need to
# clean this up before forking
self._connection = None
self._process = Process(target=run, args=(self,), name='Scheduler')
self._process.start()
return self._process
def work(self):
self._install_signal_handlers()
while True:
if self._stop_requested:
self.stop()
break
if self.should_reacquire_locks:
self.acquire_locks()
self.enqueue_scheduled_jobs()
self.heartbeat()
time.sleep(self.interval)
def run(scheduler):
scheduler.log.info('Scheduler for %s started with PID %s', ', '.join(scheduler._queue_names), os.getpid())
try:
scheduler.work()
except: # noqa
scheduler.log.error('Scheduler [PID %s] raised an exception.\n%s', os.getpid(), traceback.format_exc())
raise
scheduler.log.info('Scheduler with PID %d has stopped', os.getpid())
| RQScheduler |
python | django-import-export__django-import-export | import_export/formats/base_formats.py | {
"start": 373,
"end": 1436
} | class ____:
def get_title(self):
return type(self)
def create_dataset(self, in_stream):
"""
Create dataset from given string.
"""
raise NotImplementedError()
def export_data(self, dataset, **kwargs):
"""
Returns format representation for given dataset.
"""
raise NotImplementedError()
def is_binary(self):
"""
Returns if this format is binary.
"""
return True
def get_read_mode(self):
"""
Returns mode for opening files.
"""
return "rb"
def get_extension(self):
"""
Returns extension for this format files.
"""
return ""
def get_content_type(self):
# For content types see
# https://www.iana.org/assignments/media-types/media-types.xhtml
return "application/octet-stream"
@classmethod
def is_available(cls):
return True
def can_import(self):
return False
def can_export(self):
return False
| Format |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 197795,
"end": 204933
} | class ____(
DialectKWArgs, ColumnCollectionMixin, HasConditionalDDL, SchemaItem
):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX.
E.g.::
sometable = Table(
"sometable",
metadata,
Column("name", String(50)),
Column("address", String(100)),
)
Index("some_index", sometable.c.name)
For a no-frills, single column index, adding
:class:`_schema.Column` also supports ``index=True``::
sometable = Table(
"sometable", metadata, Column("name", String(50), index=True)
)
For a composite index, multiple columns can be specified::
Index("some_index", sometable.c.name, sometable.c.address)
Functional indexes are supported as well, typically by using the
:data:`.func` construct in conjunction with table-bound
:class:`_schema.Column` objects::
Index("some_index", func.lower(sometable.c.name))
An :class:`.Index` can also be manually associated with a
:class:`_schema.Table`,
either through inline declaration or using
:meth:`_schema.Table.append_constraint`. When this approach is used,
the names
of the indexed columns can be specified as strings::
Table(
"sometable",
metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address"),
)
To support functional or expression-based indexes in this form, the
:func:`_expression.text` construct may be used::
from sqlalchemy import text
Table(
"sometable",
metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)")),
)
.. seealso::
:ref:`schema_indexes` - General information on :class:`.Index`.
:ref:`postgresql_indexes` - PostgreSQL-specific options available for
the :class:`.Index` construct.
:ref:`mysql_indexes` - MySQL-specific options available for the
:class:`.Index` construct.
:ref:`mssql_indexes` - MSSQL-specific options available for the
:class:`.Index` construct.
"""
__visit_name__ = "index"
table: Optional[Table]
expressions: _typing_Sequence[Union[str, ColumnElement[Any]]]
_table_bound_expressions: _typing_Sequence[ColumnElement[Any]]
def __init__(
self,
name: Optional[str],
*expressions: _DDLColumnArgument,
unique: bool = False,
quote: Optional[bool] = None,
info: Optional[_InfoType] = None,
_table: Optional[Table] = None,
_column_flag: bool = False,
**dialect_kw: Any,
) -> None:
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`_schema.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`_schema.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`_schema.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
:param \**dialect_kw: Additional keyword arguments not mentioned above
are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
self.name = quoted_name.construct(name, quote)
self.unique = unique
if info is not None:
self.info = info
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if _table is not None:
table = _table
self._validate_dialect_kwargs(dialect_kw)
self.expressions = []
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(
self,
*expressions,
_column_flag=_column_flag,
_gather_expressions=self.expressions,
)
if table is not None:
self._set_parent(table)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
table = parent
assert isinstance(table, Table)
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
f"Index '{self.name}' is against table "
f"'{self.table.description}', and "
f"cannot be associated with table '{table.description}'."
)
self.table = table
table.indexes.add(self)
expressions = self.expressions
col_expressions = self._col_expressions(table)
assert len(expressions) == len(col_expressions)
exprs = []
for expr, colexpr in zip(expressions, col_expressions):
if isinstance(expr, ClauseElement):
exprs.append(expr)
elif colexpr is not None:
exprs.append(colexpr)
else:
assert False
self.expressions = self._table_bound_expressions = exprs
def create(
self,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.NONE,
) -> None:
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given
:class:`.Connection` or :class:`.Engine`` for connectivity.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(
self,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.NONE,
) -> None:
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given
:class:`.Connection` or :class:`.Engine` for connectivity.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def __repr__(self) -> str:
exprs: _typing_Sequence[Any] # noqa: F842
return "Index(%s)" % (
", ".join(
[repr(self.name)]
+ [repr(e) for e in self.expressions]
+ (self.unique and ["unique=True"] or [])
)
)
_NamingSchemaCallable = Callable[[Constraint, Table], str]
_NamingSchemaDirective = Union[str, _NamingSchemaCallable]
| Index |
python | ray-project__ray | python/ray/dashboard/modules/aggregator/publisher/ray_event_publisher.py | {
"start": 1441,
"end": 9919
} | class ____(RayEventPublisherInterface):
"""RayEvents publisher that publishes batches of events to a destination by running a worker loop.
The worker loop continuously pulls batches from the event buffer and publishes them to the destination.
"""
def __init__(
self,
name: str,
publish_client: PublisherClientInterface,
event_buffer: MultiConsumerEventBuffer,
common_metric_tags: Optional[Dict[str, str]] = None,
max_retries: int = PUBLISHER_MAX_RETRIES,
initial_backoff: float = PUBLISHER_INITIAL_BACKOFF_SECONDS,
max_backoff: float = PUBLISHER_MAX_BACKOFF_SECONDS,
jitter_ratio: float = PUBLISHER_JITTER_RATIO,
) -> None:
"""Initialize a RayEventsPublisher.
Args:
name: Name identifier for this publisher instance
publish_client: Client for publishing events to the destination
event_buffer: Buffer for reading batches of events
common_metric_tags: Common labels for all prometheus metrics
max_retries: Maximum number of retries for failed publishes
initial_backoff: Initial backoff time between retries in seconds
max_backoff: Maximum backoff time between retries in seconds
jitter_ratio: Random jitter ratio to add to backoff times
"""
self._name = name
self._common_metric_tags = dict(common_metric_tags or {})
self._common_metric_tags[CONSUMER_TAG_KEY] = name
self._max_retries = int(max_retries)
self._initial_backoff = float(initial_backoff)
self._max_backoff = float(max_backoff)
self._jitter_ratio = float(jitter_ratio)
self._publish_client = publish_client
self._event_buffer = event_buffer
# Event set once the publisher has registered as a consumer and is ready to publish events
self._started_event: asyncio.Event = asyncio.Event()
async def run_forever(self) -> None:
"""Run the publisher forever until cancellation or process death.
Registers as a consumer, starts the worker loop, and handles cleanup on cancellation.
"""
await self._event_buffer.register_consumer(self._name)
# Signal that the publisher is ready to publish events
self._started_event.set()
try:
logger.info(f"Starting publisher {self._name}")
while True:
batch = await self._event_buffer.wait_for_batch(
self._name,
PUBLISHER_MAX_BUFFER_SEND_INTERVAL_SECONDS,
)
publish_batch = PublishBatch(events=batch)
await self._async_publish_with_retries(publish_batch)
except asyncio.CancelledError:
logger.info(f"Publisher {self._name} cancelled, shutting down gracefully")
raise
except Exception as e:
logger.error(f"Publisher {self._name} encountered error: {e}")
raise
finally:
self._started_event.clear()
await self._publish_client.close()
async def wait_until_running(self, timeout: Optional[float] = None) -> bool:
"""Wait until the publisher has started.
Args:
timeout: Maximum time to wait in seconds. If None, waits indefinitely.
Returns:
True if the publisher started before the timeout, False otherwise.
If timeout is None, waits indefinitely.
"""
if timeout is None:
await self._started_event.wait()
return True
try:
await asyncio.wait_for(self._started_event.wait(), timeout)
return True
except asyncio.TimeoutError:
return False
async def _async_publish_with_retries(self, batch) -> None:
"""Attempts to publish a batch with retries.
Will retry failed publishes up to max_retries times with increasing delays.
"""
num_events_in_batch = self._publish_client.count_num_events_in_batch(batch)
failed_attempts_since_last_success = 0
while True:
start = asyncio.get_running_loop().time()
result = await self._publish_client.publish(batch)
duration = asyncio.get_running_loop().time() - start
if result.is_publish_successful:
await self._record_success(
num_published=int(result.num_events_published),
num_filtered=int(result.num_events_filtered_out),
duration=float(duration),
)
failed_attempts_since_last_success = 0
return
# Failed attempt
# case 1: if max retries are exhausted mark as failed and break out, retry indefinitely if max_retries is less than 0
if (
self._max_retries >= 0
and failed_attempts_since_last_success >= self._max_retries
):
await self._record_final_failure(
num_failed_events=int(num_events_in_batch),
duration=float(duration),
)
return
# case 2: max retries not exhausted, increment failed attempts counter and add latency to failure list, retry publishing batch with backoff
failed_attempts_since_last_success += 1
await self._record_retry_failure(
duration=float(duration),
failed_attempts=int(failed_attempts_since_last_success),
)
await self._async_sleep_with_backoff(failed_attempts_since_last_success)
async def _async_sleep_with_backoff(self, attempt: int) -> None:
"""Sleep with exponential backoff and optional jitter.
Args:
attempt: The current attempt number (0-based)
"""
delay = min(
self._max_backoff,
self._initial_backoff * (2**attempt),
)
if self._jitter_ratio > 0:
jitter = delay * self._jitter_ratio
delay = max(0.0, random.uniform(delay - jitter, delay + jitter))
await asyncio.sleep(delay)
async def _record_success(
self, num_published: int, num_filtered: int, duration: float
) -> None:
"""Update in-memory stats and Prometheus metrics for a successful publish."""
if num_published > 0:
metric_recorder.set_metric_value(
published_counter_name,
self._common_metric_tags,
int(num_published),
)
if num_filtered > 0:
metric_recorder.set_metric_value(
filtered_counter_name, self._common_metric_tags, int(num_filtered)
)
metric_recorder.set_metric_value(
consecutive_failures_gauge_name, self._common_metric_tags, 0
)
metric_recorder.set_metric_value(
time_since_last_success_gauge_name, self._common_metric_tags, 0
)
metric_recorder.set_metric_value(
publish_latency_hist_name,
{**self._common_metric_tags, "Outcome": "success"},
float(duration),
)
async def _record_retry_failure(
self, duration: float, failed_attempts: int
) -> None:
"""Update Prometheus metrics for a retryable failure attempt."""
metric_recorder.set_metric_value(
consecutive_failures_gauge_name,
self._common_metric_tags,
int(failed_attempts),
)
metric_recorder.set_metric_value(
publish_latency_hist_name,
{**self._common_metric_tags, "Outcome": "failure"},
float(duration),
)
async def _record_final_failure(
self, num_failed_events: int, duration: float
) -> None:
"""Update in-memory stats and Prometheus metrics for a final (non-retryable) failure."""
if num_failed_events > 0:
metric_recorder.set_metric_value(
failed_counter_name,
self._common_metric_tags,
int(num_failed_events),
)
metric_recorder.set_metric_value(
consecutive_failures_gauge_name, self._common_metric_tags, 0
)
metric_recorder.set_metric_value(
publish_latency_hist_name,
{**self._common_metric_tags, "Outcome": "failure"},
float(duration),
)
| RayEventPublisher |
python | django__django | tests/syndication_tests/feeds.py | {
"start": 2429,
"end": 2609
} | class ____(TestRss2Feed):
@common_decorator
def item_description(self, item):
return f"Overridden item description: {item.title}"
| TestRss2FeedWithWrongDecoratedMethod |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/github_client.py | {
"start": 4735,
"end": 18913
} | class ____:
"""
An asynchronous client for interacting with the Github API.
This client is used for making API requests to Github.
It provides methods for accessing the Github API endpoints.
The client supports two authentication methods:
1. Personal Access Token (PAT) - passed as github_token or via GITHUB_TOKEN env var
2. GitHub App - passed as github_app_auth parameter
Examples:
>>> # Using Personal Access Token
>>> client = GithubClient("my_github_token")
>>> branch_info = client.get_branch("owner", "repo", "branch")
>>>
>>> # Using GitHub App
>>> from llama_index.readers.github.github_app_auth import GitHubAppAuth
>>> with open("private-key.pem", "r") as f:
... private_key = f.read()
>>> app_auth = GitHubAppAuth(
... app_id="123456",
... private_key=private_key,
... installation_id="789012"
... )
>>> client = GithubClient(github_app_auth=app_auth)
"""
DEFAULT_BASE_URL = "https://api.github.com"
DEFAULT_API_VERSION = "2022-11-28"
def __init__(
self,
github_token: Optional[str] = None,
github_app_auth: Optional[Union["GitHubAppAuth", Any]] = None,
base_url: str = DEFAULT_BASE_URL,
api_version: str = DEFAULT_API_VERSION,
verbose: bool = False,
fail_on_http_error: bool = True,
) -> None:
"""
Initialize the GithubClient.
Args:
- github_token (str, optional): Github token for authentication.
If not provided, the client will try to get it from
the GITHUB_TOKEN environment variable. Mutually exclusive with github_app_auth.
- github_app_auth (GitHubAppAuth, optional): GitHub App authentication handler.
Mutually exclusive with github_token.
- base_url (str): Base URL for the Github API
(defaults to "https://api.github.com").
- api_version (str): Github API version (defaults to "2022-11-28").
- verbose (bool): Whether to print verbose output (defaults to False).
- fail_on_http_error (bool): Whether to raise an exception on HTTP errors (defaults to True).
Raises:
ValueError: If neither github_token nor github_app_auth is provided,
or if both are provided.
"""
# Validate authentication parameters
if github_token is not None and github_app_auth is not None:
raise ValueError(
"Cannot provide both github_token and github_app_auth. "
"Please use only one authentication method."
)
self._base_url = base_url
self._api_version = api_version
self._verbose = verbose
self._fail_on_http_error = fail_on_http_error
self._github_app_auth = github_app_auth
self._github_token = None
# Set up authentication
if github_app_auth is not None:
# Using GitHub App authentication
self._use_github_app = True
else:
# Using PAT authentication
self._use_github_app = False
if github_token is None:
github_token = os.getenv("GITHUB_TOKEN")
if github_token is None:
raise ValueError(
"Please provide a Github token or GitHub App authentication. "
+ "You can pass github_token as an argument, "
+ "set the GITHUB_TOKEN environment variable, "
+ "or pass github_app_auth for GitHub App authentication."
)
self._github_token = github_token
self._endpoints = {
"getTree": "/repos/{owner}/{repo}/git/trees/{tree_sha}",
"getBranch": "/repos/{owner}/{repo}/branches/{branch}",
"getBlob": "/repos/{owner}/{repo}/git/blobs/{file_sha}",
"getCommit": "/repos/{owner}/{repo}/commits/{commit_sha}",
}
# Base headers (Authorization header will be added per-request)
self._base_headers = {
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": f"{self._api_version}",
}
# For backward compatibility, keep _headers with PAT token
if not self._use_github_app:
self._headers = {
**self._base_headers,
"Authorization": f"Bearer {self._github_token}",
}
else:
# Headers will be generated per-request for GitHub App
self._headers = self._base_headers.copy()
def get_all_endpoints(self) -> Dict[str, str]:
"""Get all available endpoints."""
return {**self._endpoints}
async def _get_auth_headers(self) -> Dict[str, str]:
"""
Get authentication headers.
For PAT authentication, returns cached headers.
For GitHub App authentication, fetches a fresh installation token if needed.
Returns:
Dictionary containing authentication headers.
"""
if self._use_github_app:
# Get fresh token from GitHub App auth
token = await self._github_app_auth.get_installation_token()
return {
**self._base_headers,
"Authorization": f"Bearer {token}",
}
else:
# Return cached headers with PAT
return self._headers
async def request(
self,
endpoint: str,
method: str,
headers: Dict[str, Any] = {},
timeout: Optional[int] = 5,
retries: int = 0,
**kwargs: Any,
) -> Any:
"""
Make an API request to the Github API.
This method is used for making API requests to the Github API.
It is used internally by the other methods in the client.
Args:
- `endpoint (str)`: Name of the endpoint to make the request to.
- `method (str)`: HTTP method to use for the request.
- `headers (dict)`: HTTP headers to include in the request.
- `timeout (int or None)`: Timeout for the request in seconds. Default is 5.
- `retries (int)`: Number of retries for the request. Default is 0.
- `**kwargs`: Keyword arguments to pass to the endpoint URL.
Returns:
- `response (httpx.Response)`: Response from the API request.
Raises:
- ImportError: If the `httpx` library is not installed.
- httpx.HTTPError: If the API request fails and fail_on_http_error is True.
Examples:
>>> response = client.request("getTree", "GET",
owner="owner", repo="repo",
tree_sha="tree_sha", timeout=5, retries=0)
"""
try:
import httpx
except ImportError:
raise ImportError(
"Please install httpx to use the GithubRepositoryReader. "
"You can do so by running `pip install httpx`."
)
# Get authentication headers (may fetch fresh token for GitHub App)
auth_headers = await self._get_auth_headers()
_headers = {**auth_headers, **headers}
_client: httpx.AsyncClient
async with httpx.AsyncClient(
headers=_headers,
base_url=self._base_url,
timeout=timeout,
transport=httpx.AsyncHTTPTransport(retries=retries),
) as _client:
try:
response = await _client.request(
method, url=self._endpoints[endpoint].format(**kwargs)
)
except httpx.HTTPError as excp:
print(f"HTTP Exception for {excp.request.url} - {excp}")
raise excp # noqa: TRY201
return response
async def get_branch(
self,
owner: str,
repo: str,
branch: Optional[str] = None,
branch_name: Optional[str] = None,
timeout: Optional[int] = 5,
retries: int = 0,
) -> GitBranchResponseModel:
"""
Get information about a branch. (Github API endpoint: getBranch).
Args:
- `owner (str)`: Owner of the repository.
- `repo (str)`: Name of the repository.
- `branch (str)`: Name of the branch.
- `branch_name (str)`: Name of the branch (alternative to `branch`).
- `timeout (int or None)`: Timeout for the request in seconds. Default is 5.
- `retries (int)`: Number of retries for the request. Default is 0.
Returns:
- `branch_info (GitBranchResponseModel)`: Information about the branch.
Examples:
>>> branch_info = client.get_branch("owner", "repo", "branch")
"""
if branch is None:
if branch_name is None:
raise ValueError("Either branch or branch_name must be provided.")
branch = branch_name
return GitBranchResponseModel.from_json(
(
await self.request(
"getBranch",
"GET",
owner=owner,
repo=repo,
branch=branch,
timeout=timeout,
retries=retries,
)
).text
)
async def get_tree(
self,
owner: str,
repo: str,
tree_sha: str,
timeout: Optional[int] = 5,
retries: int = 0,
) -> GitTreeResponseModel:
"""
Get information about a tree. (Github API endpoint: getTree).
Args:
- `owner (str)`: Owner of the repository.
- `repo (str)`: Name of the repository.
- `tree_sha (str)`: SHA of the tree.
- `timeout (int or None)`: Timeout for the request in seconds. Default is 5.
- `retries (int)`: Number of retries for the request. Default is 0.
Returns:
- `tree_info (GitTreeResponseModel)`: Information about the tree.
Examples:
>>> tree_info = client.get_tree("owner", "repo", "tree_sha")
"""
return GitTreeResponseModel.from_json(
(
await self.request(
"getTree",
"GET",
owner=owner,
repo=repo,
tree_sha=tree_sha,
timeout=timeout,
retries=retries,
)
).text
)
async def get_blob(
self,
owner: str,
repo: str,
file_sha: str,
timeout: Optional[int] = 5,
retries: int = 0,
) -> Optional[GitBlobResponseModel]:
"""
Get information about a blob. (Github API endpoint: getBlob).
Args:
- `owner (str)`: Owner of the repository.
- `repo (str)`: Name of the repository.
- `file_sha (str)`: SHA of the file.
- `timeout (int or None)`: Timeout for the request in seconds. Default is 5.
- `retries (int)`: Number of retries for the request. Default is 0.
Returns:
- `blob_info (GitBlobResponseModel)`: Information about the blob.
Examples:
>>> blob_info = client.get_blob("owner", "repo", "file_sha")
"""
try:
return GitBlobResponseModel.from_json(
(
await self.request(
"getBlob",
"GET",
owner=owner,
repo=repo,
file_sha=file_sha,
timeout=timeout,
retries=retries,
)
).text
)
except KeyError:
print(f"Failed to get blob for {owner}/{repo}/{file_sha}")
return None
except HTTPError as excp:
print(f"HTTP Exception for {excp.request.url} - {excp}")
if self._fail_on_http_error:
raise
else:
return None
async def get_commit(
self,
owner: str,
repo: str,
commit_sha: str,
timeout: Optional[int] = 5,
retries: int = 0,
) -> GitCommitResponseModel:
"""
Get information about a commit. (Github API endpoint: getCommit).
Args:
- `owner (str)`: Owner of the repository.
- `repo (str)`: Name of the repository.
- `commit_sha (str)`: SHA of the commit.
- `timeout (int or None)`: Timeout for the request in seconds. Default is 5.
- `retries (int)`: Number of retries for the request. Default is 0.
Returns:
- `commit_info (GitCommitResponseModel)`: Information about the commit.
Examples:
>>> commit_info = client.get_commit("owner", "repo", "commit_sha")
"""
return GitCommitResponseModel.from_json(
(
await self.request(
"getCommit",
"GET",
owner=owner,
repo=repo,
commit_sha=commit_sha,
timeout=timeout,
retries=retries,
)
).text
)
if __name__ == "__main__":
import asyncio
async def main() -> None:
"""Test the GithubClient."""
client = GithubClient()
response = await client.get_tree(
owner="ahmetkca", repo="CommitAI", tree_sha="with-body"
)
for obj in response.tree:
if obj.type == "blob":
print(obj.path)
print(obj.sha)
blob_response = await client.get_blob(
owner="ahmetkca", repo="CommitAI", file_sha=obj.sha
)
print(blob_response.content if blob_response else "None")
asyncio.run(main())
| GithubClient |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_backfills.py | {
"start": 29725,
"end": 32171
} | class ____(TestBackfillEndpoint):
def test_pause_backfill(self, session, test_client):
(dag,) = self._create_dag_models()
from_date = timezone.utcnow()
to_date = timezone.utcnow()
backfill = Backfill(dag_id=dag.dag_id, from_date=from_date, to_date=to_date)
session.add(backfill)
session.commit()
response = test_client.put(f"/backfills/{backfill.id}/pause")
assert response.status_code == 200
assert response.json() == {
"completed_at": mock.ANY,
"created_at": mock.ANY,
"dag_display_name": "TEST_DAG_1",
"dag_id": "TEST_DAG_1",
"dag_run_conf": {},
"from_date": to_iso(from_date),
"id": backfill.id,
"is_paused": True,
"reprocess_behavior": "none",
"max_active_runs": 10,
"to_date": to_iso(to_date),
"updated_at": mock.ANY,
}
check_last_log(session, dag_id=None, event="pause_backfill", logical_date=None)
def test_pause_backfill_401(self, session, unauthenticated_test_client):
(dag,) = self._create_dag_models()
from_date = timezone.utcnow()
to_date = timezone.utcnow()
backfill = Backfill(dag_id=dag.dag_id, from_date=from_date, to_date=to_date)
session.add(backfill)
session.commit()
response = unauthenticated_test_client.put(f"/backfills/{backfill.id}/pause")
assert response.status_code == 401
def test_pause_backfill_403(self, session, unauthorized_test_client):
(dag,) = self._create_dag_models()
from_date = timezone.utcnow()
to_date = timezone.utcnow()
backfill = Backfill(dag_id=dag.dag_id, from_date=from_date, to_date=to_date)
session.add(backfill)
session.commit()
response = unauthorized_test_client.put(f"/backfills/{backfill.id}/pause")
assert response.status_code == 403
def test_invalid_id(self, test_client):
response = test_client.put("/backfills/invalid_id/pause")
assert response.status_code == 422
response_detail = response.json()["detail"][0]
assert response_detail["input"] == "invalid_id"
assert response_detail["loc"] == ["path", "backfill_id"]
assert (
response_detail["msg"] == "Input should be a valid integer, unable to parse string as an integer"
)
| TestPauseBackfill |
python | pytorch__pytorch | torch/_dynamo/polyfills/pytree.py | {
"start": 4127,
"end": 4370
} | class ____(str):
__slots__ = ()
def __new__(cls) -> Self:
return super().__new__(cls, "*")
def __repr__(self) -> str:
return "*" # no quotes
_asterisk = _Asterisk()
del _Asterisk
@dataclass(frozen=True)
| _Asterisk |
python | kamyu104__LeetCode-Solutions | Python/number-of-1-bits.py | {
"start": 197,
"end": 728
} | class ____(object):
# @param n, an integer
# @return an integer
def hammingWeight(self, n):
n = (n & 0x55555555) + ((n >> 1) & 0x55555555)
n = (n & 0x33333333) + ((n >> 2) & 0x33333333)
n = (n & 0x0F0F0F0F) + ((n >> 4) & 0x0F0F0F0F)
n = (n & 0x00FF00FF) + ((n >> 8) & 0x00FF00FF)
n = (n & 0x0000FFFF) + ((n >> 16) & 0x0000FFFF)
return n
# Time: O(logn/4) = O(32/4 + 8*4) = O(32)
# Space: O(1)
# https://github.com/gcc-mirror/gcc/blob/master/libgcc/libgcc2.c#L856
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau_tests/test_component.py | {
"start": 3742,
"end": 5495
} | class ____(TestTranslation):
"""Test translation of asset attributes for Tableau components."""
def test_translation(
self,
workspace_data,
attributes: Mapping[str, Any],
assertion: Callable[[AssetSpec], bool],
key_modifier: Optional[Callable[[AssetKey], AssetKey]],
) -> None:
body = copy.deepcopy(BASIC_TABLEAU_COMPONENT_BODY)
body["attributes"]["translation"] = attributes
body["attributes"]["defs_state"] = {"management_type": "LOCAL_FILESYSTEM"}
with (
instance_for_test(),
create_defs_folder_sandbox() as sandbox,
):
defs_path = sandbox.scaffold_component(
component_cls=TableauComponent,
defs_yaml_contents=body,
)
# First load and populate state
with (
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=defs_path) as (component, defs),
):
assert isinstance(component, TableauComponent)
asyncio.run(component.refresh_state(sandbox.project_root))
# Second load with populated state
with (
scoped_definitions_load_context(),
sandbox.load_component_and_build_defs(defs_path=defs_path) as (component, defs),
):
# Use a sheet asset for testing (with workbook name prefix)
key = AssetKey(["test_workbook", "sheet", "sales"])
if key_modifier:
key = key_modifier(key)
assets_def = defs.resolve_assets_def(key)
assert assertion(assets_def.get_asset_spec(key))
| TestTableauTranslation |
python | getsentry__sentry | tests/acceptance/test_project_keys.py | {
"start": 221,
"end": 1364
} | class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
ProjectKey.objects.filter(project=self.project).delete()
ProjectKey.objects.create(
project=self.project,
label="Default",
public_key="5cc0482a13d248ff99f9717101dd6356",
secret_key="410fd998318844b8894775f36184ec28",
)
self.login_as(self.user)
self.path = f"/{self.org.slug}/{self.project.slug}/settings/keys/"
def test_simple(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_test_id("project-keys")
@no_silo_test
| ProjectKeysTest |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/jobs_from_graphs.py | {
"start": 70,
"end": 644
} | class ____(dg.ConfigurableResource):
def ping_server(self): ...
@dg.op
def interact_with_server(server: Server):
server.ping_server()
@dg.graph
def do_stuff():
interact_with_server()
# end_define_graph
# start_define_jobs
import dagster as dg
prod_server = dg.ResourceDefinition.mock_resource()
local_server = dg.ResourceDefinition.mock_resource()
prod_job = do_stuff.to_job(resource_defs={"server": prod_server}, name="do_stuff_prod")
local_job = do_stuff.to_job(
resource_defs={"server": local_server}, name="do_stuff_local"
)
# end_define_jobs
| Server |
python | langchain-ai__langchain | libs/langchain/langchain_classic/output_parsers/boolean.py | {
"start": 72,
"end": 1763
} | class ____(BaseOutputParser[bool]):
"""Parse the output of an LLM call to a boolean."""
true_val: str = "YES"
"""The string value that should be parsed as True."""
false_val: str = "NO"
"""The string value that should be parsed as False."""
def parse(self, text: str) -> bool:
"""Parse the output of an LLM call to a boolean.
Args:
text: output of a language model
Returns:
boolean
"""
regexp = rf"\b({self.true_val}|{self.false_val})\b"
truthy = {
val.upper()
for val in re.findall(regexp, text, flags=re.IGNORECASE | re.MULTILINE)
}
if self.true_val.upper() in truthy:
if self.false_val.upper() in truthy:
msg = (
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
raise ValueError(msg)
return True
if self.false_val.upper() in truthy:
if self.true_val.upper() in truthy:
msg = (
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
f"in received: {text}."
)
raise ValueError(msg)
return False
msg = (
f"BooleanOutputParser expected output value to include either "
f"{self.true_val} or {self.false_val}. Received {text}."
)
raise ValueError(msg)
@property
def _type(self) -> str:
"""Snake-case string identifier for an output parser type."""
return "boolean_output_parser"
| BooleanOutputParser |
python | pytorch__pytorch | torch/_dynamo/output_graph.py | {
"start": 6444,
"end": 7342
} | class ____:
def __init__(self) -> None:
self.cache: dict[VariableTrackerCacheKey, VariableTracker] = {}
def lookup(self, value: Any, source: Source) -> Optional[VariableTracker]:
key = VariableTrackerCacheKey(id(value), source)
if key not in self.cache:
return None
return self.cache[key]
def add(self, value: Any, source: Source, vt: VariableTracker) -> None:
key = VariableTrackerCacheKey(id(value), source)
self.cache[key] = vt
def clone(self) -> "VariableTrackerCache":
# Needed for copy and restore graph state
new_cache = VariableTrackerCache()
new_cache.cache.update(self.cache)
return new_cache
def clear(self) -> None:
self.cache.clear()
@functools.cache
def _step_logger() -> Any:
return torchdynamo_logging.get_step_logger(log)
@dataclass
| VariableTrackerCache |
python | walkccc__LeetCode | solutions/2144. Minimum Cost of Buying Candies With Discount/2144.py | {
"start": 0,
"end": 114
} | class ____:
def minimumCost(self, cost: list[int]) -> int:
return sum(cost) - sum(sorted(cost)[-3::-3])
| Solution |
python | ansible__ansible | test/units/executor/module_common/test_module_common.py | {
"start": 2790,
"end": 4546
} | class ____:
"""Note: We may want to change the API of this function in the future. It isn't a great API"""
def test_no_interpreter_set(self, templar):
# normally this would return /usr/bin/python, but so long as we're defaulting to auto python discovery, we'll get
# an InterpreterDiscoveryRequiredError here instead
with pytest.raises(InterpreterDiscoveryRequiredError):
amc._get_shebang(u'/usr/bin/python', {}, templar)
def test_python_interpreter(self, templar):
assert amc._get_shebang(u'/usr/bin/python3.8', {}, templar) == ('#!/usr/bin/python3.8', u'/usr/bin/python3.8')
def test_non_python_interpreter(self, templar):
assert amc._get_shebang(u'/usr/bin/ruby', {}, templar) == ('#!/usr/bin/ruby', u'/usr/bin/ruby')
def test_interpreter_set_in_task_vars(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/pypy'}, templar) == \
(u'#!/usr/bin/pypy', u'/usr/bin/pypy')
def test_non_python_interpreter_in_task_vars(self, templar):
assert amc._get_shebang(u'/usr/bin/ruby', {u'ansible_ruby_interpreter': u'/usr/local/bin/ruby'}, templar) == \
(u'#!/usr/local/bin/ruby', u'/usr/local/bin/ruby')
def test_with_args(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/python3'}, templar, args=('-tt', '-OO')) == \
(u'#!/usr/bin/python3 -tt -OO', u'/usr/bin/python3')
def test_python_via_env(self, templar):
assert amc._get_shebang(u'/usr/bin/python', {u'ansible_python_interpreter': u'/usr/bin/env python'}, templar) == \
(u'#!/usr/bin/env python', u'/usr/bin/env python')
| TestGetShebang |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 44643,
"end": 51604
} | class ____(_TestNormBase):
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector_return_type(self):
a = np.array([1, 0, 1])
exact_types = "Bbhil" # np.typecodes["AllInteger"]
inexact_types = "efdFD" # np.typecodes["AllFloat"]
all_types = exact_types + inexact_types
for each_type in all_types:
at = a.astype(each_type)
if each_type == np.dtype("float16"):
# FIXME: move looping to parametrize, add decorators=[xfail]
# pytest.xfail("float16**float64 => float64 (?)")
raise SkipTest("float16**float64 => float64 (?)")
an = norm(at, -np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 0.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
self.check_dtype(at, an)
assert_almost_equal(an, 0.0)
an = norm(at, 0)
self.check_dtype(at, an)
assert_almost_equal(an, 2)
an = norm(at, 1)
self.check_dtype(at, an)
assert_almost_equal(an, 2.0)
an = norm(at, 2)
self.check_dtype(at, an)
assert_almost_equal(an, an.dtype.type(2.0) ** an.dtype.type(1.0 / 2.0))
an = norm(at, 4)
self.check_dtype(at, an)
assert_almost_equal(an, an.dtype.type(2.0) ** an.dtype.type(1.0 / 4.0))
an = norm(at, np.inf)
self.check_dtype(at, an)
assert_almost_equal(an, 1.0)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30**0.5, decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0, decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0, decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0, decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30**0.5, decimal=self.dec)
np.testing.assert_almost_equal(
norm(v, -2), ((205.0 / 144) ** -0.5), decimal=self.dec
)
np.testing.assert_almost_equal(norm(v, 0), 4, decimal=self.dec)
for v in (
a,
b,
c,
):
_test(v)
for v in (
array(a, dtype=self.dt),
array(b, dtype=self.dt),
array(c, dtype=self.dt),
):
_test(v)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.inf, -np.inf, "fro"]:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(
(RuntimeError, ValueError), norm, B, ord=order, axis=axis
)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [
norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])
]
else:
expected = [
norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])
]
assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
allclose_err = "order {0}, axis = {1}"
shape_err = "Shape mismatch found {0}, expected {1}, order={2}, axis={3}"
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(
np.squeeze(found), expected, err_msg=allclose_err.format(None, None)
)
expected_shape = (1, 1, 1)
assert_(
found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None),
)
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(
np.squeeze(found), expected, err_msg=allclose_err.format(order, k)
)
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(
found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k),
)
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.inf, -np.inf, "fro", "nuc"]:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(
np.squeeze(found), expected, err_msg=allclose_err.format(order, k)
)
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(
found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k),
)
| _TestNormGeneral |
python | pytorch__pytorch | test/export/test_sparse.py | {
"start": 2020,
"end": 9228
} | class ____(TestCase):
def setUp(self):
super().setUp()
def assertEqualMeta(self, x, y):
self.assertIsInstance(x, FakeTensor)
self.assertIsInstance(y, torch.Tensor)
# Convert expected value to meta for comparison.
y = y.to("meta")
self.assertEqual(x, y, exact_layout=True, exact_is_coalesced=True)
# When x or y is a meta tensor (say, `x.device == "meta"`), then
# assertEqual(x, y) compares only x and y attributes but skips
# comparing their values. In the case of sparse tensors, this means
# that comparing indices and values attributes are skipped as well,
# which is why we are doing that explicitly below.
if x.layout is torch.strided:
pass
elif x.layout is torch.sparse_coo:
self.assertEqual(x._indices(), y._indices(), exact_layout=True)
self.assertEqual(x._values(), y._values(), exact_layout=True)
else:
if x.layout in {torch.sparse_csr, torch.sparse_bsr}:
x_meta1, y_meta1 = (x.crow_indices(), y.crow_indices())
x_meta2, y_meta2 = (x.col_indices(), y.col_indices())
elif x.layout in {torch.sparse_csc, torch.sparse_bsc}:
x_meta1, y_meta1 = (x.ccol_indices(), y.ccol_indices())
x_meta2, y_meta2 = (x.row_indices(), y.row_indices())
else:
assert 0 # unreachable
self.assertEqual(x_meta1, y_meta1, exact_layout=True)
self.assertEqual(x_meta2, y_meta2, exact_layout=True)
self.assertEqual(x.values(), y.values(), exact_layout=True)
@parametrize("dtype", DTYPES)
@parametrize("itype", ITYPES)
@all_sparse_layouts("layout")
def test_idnet(self, dtype, itype, layout):
net = IdNet()
for sparse_input in self.generate_simple_inputs(
layout,
device="cpu",
dtype=dtype,
index_dtype=itype,
):
# Build the traced graph.
prog = torch.export.export(net, (sparse_input,), strict=True)
# Test arg/output.
for i, node in enumerate(prog.graph.nodes):
meta = node.meta.get("val", None)
if i == 0:
self.assertEqualMeta(meta, sparse_input)
else:
self.assertEqual(meta, None)
@parametrize("dtype", DTYPES)
@parametrize("itype", ITYPES)
@all_sparse_layouts("layout")
def test_sumnet(self, dtype, itype, layout):
net = SumNet()
for sparse_input in self.generate_simple_inputs(
layout,
device="cpu",
dtype=dtype,
index_dtype=itype,
):
result = net(sparse_input)
# Build the traced graph.
prog = torch.export.export(net, (sparse_input,), strict=True)
# Test arg/sum/output.
for i, node in enumerate(prog.graph.nodes):
meta = node.meta.get("val", None)
if i == 0:
self.assertEqualMeta(meta, sparse_input)
elif i == 1:
self.assertEqualMeta(meta, result)
else:
self.assertEqual(meta, None)
@parametrize("dtype", DTYPES)
@parametrize("itype", ITYPES)
@all_sparse_layouts("layout")
def test_eltwisenet(self, dtype, itype, layout):
net = EltwiseNet()
for sparse_input in self.generate_simple_inputs(
layout,
device="cpu",
dtype=dtype,
index_dtype=itype,
):
result = net(sparse_input)
# Build the traced graph.
prog = torch.export.export(net, (sparse_input,), strict=True)
# Test arg/neg/abs/mul/relu/output.
for i, node in enumerate(prog.graph.nodes):
meta = node.meta.get("val", None)
if i <= 4:
self.assertEqualMeta(meta, result)
else:
self.assertEqual(meta, None)
@parametrize("dtype", DTYPES)
@parametrize("itype", ITYPES)
@all_sparse_layouts("layout")
def test_todensenet(self, dtype, itype, layout):
net = ToDenseNet()
for sparse_input in self.generate_simple_inputs(
layout,
device="cpu",
dtype=dtype,
index_dtype=itype,
):
result = net(sparse_input)
# Build the traced graph.
prog = torch.export.export(net, (sparse_input,), strict=True)
# Test arg/todense/output.
for i, node in enumerate(prog.graph.nodes):
meta = node.meta.get("val", None)
if i == 0:
self.assertEqualMeta(meta, sparse_input)
elif i == 1:
self.assertEqualMeta(meta, result)
else:
self.assertEqual(meta, None)
def test_add(self):
net = AddNet()
Y = torch.arange(16, 32, dtype=torch.float32).view(4, 4)
A = torch.tensor(
[
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 2.0],
[0.0, 0.0, 1.0, 1.0],
[3.0, 0.0, 3.0, 0.0],
],
dtype=torch.float32,
)
S = A.to_sparse_csr()
result = net(S, Y)
# Build the traced graph.
prog = torch.export.export(net, (S, Y), strict=True)
# Test args/add/output.
for i, node in enumerate(prog.graph.nodes):
meta = node.meta.get("val", None)
if i == 0:
self.assertEqualMeta(meta, S)
elif i == 1:
self.assertEqualMeta(meta, Y)
elif i == 2:
self.assertEqualMeta(meta, result)
else:
self.assertEqual(meta, None)
def test_activation_coo(self):
net = SparseActivationCOO()
x = [torch.randn(3, 3) for _ in range(3)]
result = net(x)
# Build the traced graph.
prog = torch.export.export(net, args=(x,), strict=True)
# Test args/to_sparse/output.
for i, node in enumerate(prog.graph.nodes):
meta = node.meta.get("val", None)
if i <= 2:
self.assertEqualMeta(meta, x[i])
elif i <= 5:
self.assertEqualMeta(meta, result[i - 3])
else:
self.assertEqual(meta, None)
def test_activation_csr(self):
net = SparseActivationCSR()
x = [torch.randn(3, 3) for _ in range(3)]
result = net(x)
# Build the traced graph.
prog = torch.export.export(net, args=(x,), strict=True)
# Test args/to_sparse/output.
for i, node in enumerate(prog.graph.nodes):
meta = node.meta.get("val", None)
if i <= 2:
self.assertEqualMeta(meta, x[i])
elif i <= 5:
self.assertEqualMeta(meta, result[i - 3])
else:
self.assertEqual(meta, None)
instantiate_parametrized_tests(TestSparseProp)
if __name__ == "__main__":
run_tests()
| TestSparseProp |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/pettingzoo_chess.py | {
"start": 232,
"end": 6917
} | class ____(MultiAgentEnv):
"""An interface to the PettingZoo MARL environment library.
See: https://github.com/Farama-Foundation/PettingZoo
Inherits from MultiAgentEnv and exposes a given AEC
(actor-environment-cycle) game from the PettingZoo project via the
MultiAgentEnv public API.
Note that the wrapper has some important limitations:
1. All agents have the same action_spaces and observation_spaces.
Note: If, within your aec game, agents do not have homogeneous action /
observation spaces, apply SuperSuit wrappers
to apply padding functionality: https://github.com/Farama-Foundation/
SuperSuit#built-in-multi-agent-only-functions
2. Environments are positive sum games (-> Agents are expected to cooperate
to maximize reward). This isn't a hard restriction, it just that
standard algorithms aren't expected to work well in highly competitive
games.
.. testcode::
:skipif: True
from pettingzoo.butterfly import prison_v3
from ray.rllib.env.wrappers.pettingzoo_env import PettingZooEnv
env = PettingZooEnv(prison_v3.env())
obs = env.reset()
print(obs)
# only returns the observation for the agent which should be stepping
.. testoutput::
{
'prisoner_0': array([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
...,
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], dtype=uint8)
}
.. testcode::
:skipif: True
obs, rewards, dones, infos = env.step({
"prisoner_0": 1
})
# only returns the observation, reward, info, etc, for
# the agent who's turn is next.
print(obs)
.. testoutput::
{
'prisoner_1': array([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
...,
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], dtype=uint8)
}
.. testcode::
:skipif: True
print(rewards)
.. testoutput::
{
'prisoner_1': 0
}
.. testcode::
:skipif: True
print(dones)
.. testoutput::
{
'prisoner_1': False, '__all__': False
}
.. testcode::
:skipif: True
print(infos)
.. testoutput::
{
'prisoner_1': {'map_tuple': (1, 0)}
}
"""
def __init__(
self,
config: Dict[Any, Any] = None,
env: AECEnv = None,
):
super().__init__()
if env is None:
self.env = chess_v5()
else:
self.env = env
self.env.reset()
self.config = config
if self.config is None:
self.config = {}
try:
self.config["random_start"] = self.config["random_start"]
except KeyError:
self.config["random_start"] = 4
# If these important attributes are not set, try to infer them.
if not self.agents:
self.agents = list(self._agent_ids)
if not self.possible_agents:
self.possible_agents = self.agents.copy()
# Get first observation space, assuming all agents have equal space
self.observation_space = self.env.observation_space(self.env.agents[0])
# Get first action space, assuming all agents have equal space
self.action_space = self.env.action_space(self.env.agents[0])
assert all(
self.env.observation_space(agent) == self.observation_space
for agent in self.env.agents
), (
"Observation spaces for all agents must be identical. Perhaps "
"SuperSuit's pad_observations wrapper can help (useage: "
"`supersuit.aec_wrappers.pad_observations(env)`"
)
assert all(
self.env.action_space(agent) == self.action_space
for agent in self.env.agents
), (
"Action spaces for all agents must be identical. Perhaps "
"SuperSuit's pad_action_space wrapper can help (usage: "
"`supersuit.aec_wrappers.pad_action_space(env)`"
)
self._agent_ids = set(self.env.agents)
def random_start(self, random_moves):
self.env.board = ch.Board()
for i in range(random_moves):
self.env.board.push(np.random.choice(list(self.env.board.legal_moves)))
return self.env.board
def observe(self):
return {
self.env.agent_selection: self.env.observe(self.env.agent_selection),
"state": self.get_state(),
}
def reset(self, *args, **kwargs):
self.env.reset()
if self.config["random_start"] > 0:
self.random_start(self.config["random_start"])
return (
{self.env.agent_selection: self.env.observe(self.env.agent_selection)},
{self.env.agent_selection: {}},
)
def step(self, action):
try:
self.env.step(action[self.env.agent_selection])
except (KeyError, IndexError):
self.env.step(action)
except AssertionError:
# Illegal action
print(action)
raise AssertionError("Illegal action")
obs_d = {}
rew_d = {}
done_d = {}
truncated_d = {}
info_d = {}
while self.env.agents:
obs, rew, done, trunc, info = self.env.last()
a = self.env.agent_selection
obs_d[a] = obs
rew_d[a] = rew
done_d[a] = done
truncated_d[a] = trunc
info_d[a] = info
if self.env.terminations[self.env.agent_selection]:
self.env.step(None)
done_d["__all__"] = True
truncated_d["__all__"] = True
else:
done_d["__all__"] = False
truncated_d["__all__"] = False
break
return obs_d, rew_d, done_d, truncated_d, info_d
def close(self):
self.env.close()
def seed(self, seed=None):
self.env.seed(seed)
def render(self, mode="human"):
return self.env.render(mode)
@property
def agent_selection(self):
return self.env.agent_selection
@property
def get_sub_environments(self):
return self.env.unwrapped
def get_state(self):
state = copy.deepcopy(self.env)
return state
def set_state(self, state):
self.env = copy.deepcopy(state)
return self.env.observe(self.env.agent_selection)
| MultiAgentChess |
python | pytorch__pytorch | test/ao/sparsity/test_scheduler.py | {
"start": 237,
"end": 475
} | class ____(BaseScheduler):
def get_sl(self):
if self.last_epoch > 0:
return [group["sparsity_level"] * 0.5 for group in self.sparsifier.groups]
else:
return list(self.base_sl)
| ImplementedScheduler |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/post_votes_response_builder.py | {
"start": 357,
"end": 774
} | class ____(HttpResponseBuilder):
@classmethod
def posts_votes_response(cls, request_without_cursor_for_pagination: Optional[HttpRequest] = None) -> "PostsVotesResponseBuilder":
return cls(
find_template("votes", __file__),
FieldPath("votes"),
CursorBasedPaginationStrategy(http_request_to_str(request_without_cursor_for_pagination)),
)
| PostsVotesResponseBuilder |
python | falconry__falcon | tests/test_recipes.py | {
"start": 4150,
"end": 5029
} | class ____:
class MediaEcho:
def on_post(self, req, resp):
resp.content_type = req.content_type
resp.media = req.get_media()
def test_text_plain_basic(self, util):
recipe = util.load_module('examples/recipes/plain_text_main.py')
app = falcon.App()
app.req_options.media_handlers['text/plain'] = recipe.TextHandler()
app.resp_options.media_handlers['text/plain'] = recipe.TextHandler()
app.add_route('/media', self.MediaEcho())
client = falcon.testing.TestClient(app)
payload = 'Hello, Falcon!'
headers = {'Content-Type': 'text/plain'}
response = client.simulate_post('/media', body=payload, headers=headers)
assert response.status_code == 200
assert response.content_type == 'text/plain'
assert response.text == payload
| TestTextPlainHandler |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 66046,
"end": 67318
} | class ____(TMATemplateConfigMixin):
def _get_template_configs_impl(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> Generator[dict[str, Any], None, None]:
"""
Generate TMA template configs by calling super and adding TMA-specific options.
"""
base_ops = {
"NUM_SMS": get_num_sms(),
# TODO: Consider making this tunable.
"FLATTEN": True,
}
# Get base template configs from superclass
for template_kwargs in super()._get_template_configs_impl(
kernel_inputs,
op_name,
):
# Some Triton versions requires num_warps >= 4 for WS
# to avoid compilation issues. Triton disables WS if num_warps < 4
# or num_stages < 2. Similar issues have been seen with num_stages=1
ws = (
template_kwargs["num_warps"] >= 4 and template_kwargs["num_stages"] >= 2
)
yield {
**template_kwargs,
**base_ops,
"WARP_SPECIALIZE": ws,
"EPILOGUE_SUBTILE": config.triton.enable_epilogue_subtiling,
}
# Scaled MM-specific mixin for scaled MM templates
| BlackwellTMATemplateConfigMixin |
python | scipy__scipy | scipy/sparse/linalg/_special_sparse_arrays.py | {
"start": 30268,
"end": 34361
} | class ____:
"""
Construct the Mikota pair of matrices in various formats and
eigenvalues of the generalized eigenproblem with them.
The Mikota pair of matrices [1, 2]_ models a vibration problem
of a linear mass-spring system with the ends attached where
the stiffness of the springs and the masses increase along
the system length such that vibration frequencies are subsequent
integers 1, 2, ..., `n` where `n` is the number of the masses. Thus,
eigenvalues of the generalized eigenvalue problem for
the matrix pair `K` and `M` where `K` is the system stiffness matrix
and `M` is the system mass matrix are the squares of the integers,
i.e., 1, 4, 9, ..., ``n * n``.
The stiffness matrix `K` is square real tri-diagonal symmetric
positive definite. The mass matrix `M` is diagonal with diagonal
entries 1, 1/2, 1/3, ...., ``1/n``. Both matrices get
ill-conditioned with `n` growing.
Parameters
----------
n : int
The size of the matrices of the Mikota pair.
dtype : dtype
Numerical type of the array. Default is ``np.float64``.
Attributes
----------
eigenvalues : 1D ndarray, ``np.uint64``
All eigenvalues of the Mikota pair ordered ascending.
Methods
-------
MikotaK()
A `LinearOperator` custom object for the stiffness matrix.
MikotaM()
A `LinearOperator` custom object for the mass matrix.
.. versionadded:: 1.12.0
References
----------
.. [1] J. Mikota, "Frequency tuning of chain structure multibody oscillators
to place the natural frequencies at omega1 and N-1 integer multiples
omega2,..., omegaN", Z. Angew. Math. Mech. 81 (2001), S2, S201-S202.
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004).
.. [2] Peter C. Muller and Metin Gurgoze,
"Natural frequencies of a multi-degree-of-freedom vibration system",
Proc. Appl. Math. Mech. 6, 319-320 (2006).
http://dx.doi.org/10.1002/pamm.200610141.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg._special_sparse_arrays import MikotaPair
>>> n = 6
>>> mik = MikotaPair(n)
>>> mik_k = mik.k
>>> mik_m = mik.m
>>> mik_k.toarray()
array([[11., -5., 0., 0., 0., 0.],
[-5., 9., -4., 0., 0., 0.],
[ 0., -4., 7., -3., 0., 0.],
[ 0., 0., -3., 5., -2., 0.],
[ 0., 0., 0., -2., 3., -1.],
[ 0., 0., 0., 0., -1., 1.]])
>>> mik_k.tobanded()
array([[ 0., -5., -4., -3., -2., -1.],
[11., 9., 7., 5., 3., 1.]])
>>> mik_m.tobanded()
array([1. , 0.5 , 0.33333333, 0.25 , 0.2 ,
0.16666667])
>>> mik_k.tosparse()
<DIAgonal sparse array of dtype 'float64'
with 16 stored elements (3 diagonals) and shape (6, 6)>
>>> mik_m.tosparse()
<DIAgonal sparse array of dtype 'float64'
with 6 stored elements (1 diagonals) and shape (6, 6)>
>>> np.array_equal(mik_k(np.eye(n)), mik_k.toarray())
True
>>> np.array_equal(mik_m(np.eye(n)), mik_m.toarray())
True
>>> mik.eigenvalues()
array([ 1, 4, 9, 16, 25, 36])
>>> mik.eigenvalues(2)
array([ 1, 4])
"""
def __init__(self, n, dtype=np.float64):
self.n = n
self.dtype = dtype
self.shape = (n, n)
self.m = MikotaM(self.shape, self.dtype)
self.k = MikotaK(self.shape, self.dtype)
def eigenvalues(self, m=None):
"""Return the requested number of eigenvalues.
Parameters
----------
m : int, optional
The positive number of smallest eigenvalues to return.
If not provided, then all eigenvalues will be returned.
Returns
-------
eigenvalues : `np.uint64` array
The requested `m` smallest or all eigenvalues, in ascending order.
"""
if m is None:
m = self.n
arange_plus1 = np.arange(1, m + 1, dtype=np.uint64)
return arange_plus1 * arange_plus1
| MikotaPair |
python | plotly__plotly.py | plotly/graph_objs/treemap/_stream.py | {
"start": 233,
"end": 3511
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "treemap"
_path_str = "treemap.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.treemap.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 5607,
"end": 5689
} | class ____(BaseModel):
"""Represents a GitHub label."""
name: str
| LabelNode |
python | joke2k__faker | faker/providers/date_time/vi_VN/__init__.py | {
"start": 46,
"end": 960
} | class ____(DateTimeProvider):
# Source: https://vi.wikipedia.org/wiki/%C4%90%E1%BB%8Bnh_d%E1%BA%A1ng_ng%C3%A0y_v%C3%A0_gi%E1%BB%9D_%E1%BB%9F_Vi%E1%BB%87t_Nam # NOQA
DAY_NAMES = {
"0": "Chủ Nhật",
"1": "Thứ Hai",
"2": "Thứ Ba",
"3": "Thứ Tư",
"4": "Thứ Năm",
"5": "Thứ Sáu",
"6": "Thứ Bảy",
}
MONTH_NAMES = {
"01": "Tháng Một",
"02": "Tháng Hai",
"03": "Tháng Ba",
"04": "Tháng Tư",
"05": "Tháng Năm",
"06": "Tháng Sáu",
"07": "Tháng Bảy",
"08": "Tháng Tám",
"09": "Tháng Chín",
"10": "Tháng Mười",
"11": "Tháng Mười Một",
"12": "Tháng Mười Hai",
}
def day_of_week(self):
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/vertex_ai/feature_store.py | {
"start": 1727,
"end": 20345
} | class ____(GoogleBaseHook):
"""
Hook for interacting with Google Cloud Vertex AI Feature Store.
This hook provides an interface to manage Feature Store resources in Vertex AI,
including feature views and their synchronization operations. It handles authentication
and provides methods for common Feature Store operations.
:param gcp_conn_id: The connection ID to use for connecting to Google Cloud Platform.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials. Can be either a single account or a chain of accounts required to
get the access_token of the last account in the list, which will be impersonated
in the request. If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role. If set as a sequence, the identities
from the list must grant Service Account Token Creator IAM role to the directly
preceding identity, with first account from the list granting this role to the
originating account.
"""
@staticmethod
def _get_client_options(
location: str | None = None,
custom_endpoint: str | None = None,
) -> ClientOptions:
if custom_endpoint:
client_options = ClientOptions(api_endpoint=custom_endpoint)
elif location and location != "global":
client_options = ClientOptions(api_endpoint=f"{location}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return client_options
def get_feature_online_store_admin_service_client(
self,
location: str | None = None,
) -> FeatureOnlineStoreAdminServiceClient:
"""
Create and returns a FeatureOnlineStoreAdminServiceClient object.
This method initializes a client for interacting with the Feature Store API,
handling proper endpoint configuration based on the specified location.
:param location: Optional. The Google Cloud region where the service is located.
If provided and not 'global', the client will be configured to use the
region-specific API endpoint.
"""
return FeatureOnlineStoreAdminServiceClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=self._get_client_options(location),
)
def get_feature_online_store_service_client(
self,
location: str | None = None,
custom_endpoint: str | None = None,
) -> FeatureOnlineStoreServiceClient:
return FeatureOnlineStoreServiceClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
client_options=self._get_client_options(location, custom_endpoint),
)
@GoogleBaseHook.fallback_to_default_project_id
def create_feature_online_store(
self,
feature_online_store_id: str,
feature_online_store: FeatureOnlineStore,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create and sends request for Feature Online store.
This method initiates VertexAI Feature Online Store creation request.
Feature Online Store aims to serve and manage features data as a part of VertexAI MLOps.
:param feature_online_store_id: The ID of the online feature store.
:param feature_online_store: The configuration of the online repository.
:param project_id: The ID of the Google Cloud project that contains the
feature store. If not provided, will attempt to determine from the environment.
:param location: The Google Cloud region where the feature store is located
(e.g., 'us-central1', 'us-east1').
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_feature_online_store_admin_service_client(location)
return client.create_feature_online_store(
request={
"parent": f"projects/{project_id}/locations/{location}",
"feature_online_store_id": feature_online_store_id,
"feature_online_store": feature_online_store,
},
timeout=timeout,
retry=retry,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_feature_online_store(
self,
feature_online_store_id: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
) -> FeatureOnlineStore:
"""
Get Feature Online store data.
Get the FeatureOnlineStore details.
Vertex AI Feature Online Store provides a centralized repository for serving ML features
and embedding indexes at low latency.
:param feature_online_store_id: The ID of the online feature store.
:param project_id: The ID of the Google Cloud project that contains the
feature store. If not provided, will attempt to determine from the environment.
:param location: The Google Cloud region where the feature store is located
(e.g., 'us-central1', 'us-east1').
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_feature_online_store_admin_service_client(location)
return client.get_feature_online_store(
name=f"projects/{project_id}/locations/{location}/featureOnlineStores/{feature_online_store_id}",
timeout=timeout,
retry=retry,
metadata=metadata,
)
@staticmethod
def _get_featurestore_public_endpoint(feature_online_store: FeatureOnlineStore):
public_endpoint = None
featurestore_data = type(feature_online_store).to_dict(feature_online_store)
if "dedicated_serving_endpoint" in featurestore_data:
public_endpoint = featurestore_data["dedicated_serving_endpoint"].get(
"public_endpoint_domain_name"
)
return public_endpoint
@GoogleBaseHook.fallback_to_default_project_id
def create_feature_view(
self,
feature_view_id: str,
feature_view: FeatureView,
feature_online_store_id: str,
project_id: str = PROVIDE_PROJECT_ID,
run_sync_immediately: bool = False,
location: str | None = None,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create request for Feature View creation.
This method initiates VertexAI Feature View request for the existing Feature Online Store.
Feature View represents features and data according to the source provided.
:param feature_view_id: The ID to use for the FeatureView, which will
become the final component of the FeatureView's resource name.
This value may be up to 60 characters, and valid characters are ``[a-z0-9_]``.
The first character cannot be a number. The value must be unique within a FeatureOnlineStore.
:param feature_view: The configuration of the FeatureView to create.
:param feature_online_store_id: The ID of the online feature store.
:param run_sync_immediately: If set to true, one on demand sync will be run
immediately, regardless the FeatureView.sync_config.
:param project_id: The ID of the Google Cloud project that contains the
feature store. If not provided, will attempt to determine from the environment.
:param location: The Google Cloud region where the feature store is located
(e.g., 'us-central1', 'us-east1').
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_feature_online_store_admin_service_client(location)
return client.create_feature_view(
request={
"parent": f"projects/{project_id}/locations/"
f"{location}/featureOnlineStores/{feature_online_store_id}",
"feature_view_id": feature_view_id,
"feature_view": feature_view,
"run_sync_immediately": run_sync_immediately,
},
timeout=timeout,
retry=retry,
metadata=metadata,
)
def get_feature_view_sync(
self,
location: str,
feature_view_sync_name: str,
) -> dict:
"""
Retrieve the status and details of a Feature View synchronization operation.
This method fetches information about a specific feature view sync operation,
including its current status, timing information, and synchronization metrics.
:param location: The Google Cloud region where the feature store is located
(e.g., 'us-central1', 'us-east1').
:param feature_view_sync_name: The full resource name of the feature view
sync operation to retrieve.
"""
client = self.get_feature_online_store_admin_service_client(location)
try:
response = client.get_feature_view_sync(name=feature_view_sync_name)
report = {
"name": feature_view_sync_name,
"start_time": int(response.run_time.start_time.seconds),
}
if hasattr(response.run_time, "end_time") and response.run_time.end_time.seconds:
report["end_time"] = int(response.run_time.end_time.seconds)
report["sync_summary"] = {
"row_synced": int(response.sync_summary.row_synced),
"total_slot": int(response.sync_summary.total_slot),
}
return report
except Exception as e:
self.log.error("Failed to get feature view sync: %s", str(e))
raise AirflowException(str(e))
@GoogleBaseHook.fallback_to_default_project_id
def sync_feature_view(
self,
location: str,
feature_online_store_id: str,
feature_view_id: str,
project_id: str = PROVIDE_PROJECT_ID,
) -> str:
"""
Initiate a synchronization operation for a Feature View.
This method triggers a sync operation that updates the online serving data
for a feature view based on the latest data in the underlying batch source.
The sync operation ensures that the online feature values are up-to-date
for real-time serving.
:param location: The Google Cloud region where the feature store is located
(e.g., 'us-central1', 'us-east1').
:param feature_online_store_id: The ID of the online feature store that
contains the feature view to be synchronized.
:param feature_view_id: The ID of the feature view to synchronize.
:param project_id: The ID of the Google Cloud project that contains the
feature store. If not provided, will attempt to determine from the
environment.
"""
client = self.get_feature_online_store_admin_service_client(location)
feature_view = (
f"projects/{project_id}/locations/{location}/featureOnlineStores/"
f"{feature_online_store_id}/featureViews/{feature_view_id}"
)
try:
response = client.sync_feature_view(feature_view=feature_view)
return str(response.feature_view_sync)
except Exception as e:
self.log.error("Failed to sync feature view: %s", str(e))
raise AirflowException(str(e))
@GoogleBaseHook.fallback_to_default_project_id
def fetch_feature_values(
self,
feature_view_id: str,
feature_online_store_id: str,
entity_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
endpoint_domain_name: str | None = None,
data_key: FeatureViewDataKey | None = None,
data_format: int | None = None,
location: str | None = None,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
) -> FetchFeatureValuesResponse:
"""
Fetch data from the Feature View provided.
This method fetches data from existing Feature view, filtered by provided (or default) data_key.
Helps to retrieve actual features data hosted in the VertexAI Feature Store.
:param entity_id: Simple ID to identify Entity to fetch feature values for.
:param endpoint_domain_name: Optional. Public domain name, hosting the content of Optimized
Feature Online store. Should be omitted, if bigtable configuration provided for the FeatureStore,
and default feature store endpoint will be used, based on location provided.
:param feature_view_id: The FeatureView ID to fetch data from.
:param feature_online_store_id: The ID of the online feature store.
:param data_key: Optional. The request key to fetch feature values for.
:param data_format: Optional. Response data format. If not set, FeatureViewDataFormat.KEY_VALUE
will be used.
:param project_id: The ID of the Google Cloud project that contains the
feature store. If not provided, will attempt to determine from the
environment.
:param location: The Google Cloud region where the feature store is located
(e.g., 'us-central1', 'us-east1').
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
data_client = self.get_feature_online_store_service_client(location, endpoint_domain_name)
return data_client.fetch_feature_values(
request={
"id": entity_id,
"feature_view": f"projects/{project_id}/locations/{location}/featureOnlineStores/"
f"{feature_online_store_id}/featureViews/{feature_view_id}",
"data_key": data_key,
"data_format": data_format,
},
timeout=timeout,
retry=retry,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_feature_view(
self,
feature_view_id: str,
feature_online_store_id: str,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete the Feature View.
This method deletes the Feature View from the Feature Online Store.
:param feature_view_id: The ID to use for the FeatureView, to be deleted.
:param feature_online_store_id: The ID of the online feature store.
:param project_id: The ID of the Google Cloud project that contains the
feature store. If not provided, will attempt to determine from the
environment.
:param location: The Google Cloud region where the feature store is located
(e.g., 'us-central1', 'us-east1').
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_feature_online_store_admin_service_client(location)
return client.delete_feature_view(
name=f"projects/{project_id}/locations/{location}/featureOnlineStores/{feature_online_store_id}"
f"/featureViews/{feature_view_id}",
timeout=timeout,
retry=retry,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_feature_online_store(
self,
feature_online_store_id: str,
force: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete the FeatureOnlineStore.
This method deletes the Feature Online Store and all features data.
The FeatureOnlineStore must not contain any FeatureViews.
:param feature_online_store_id: The ID of the online feature store.
:param force: If set to true, any FeatureViews and Features for this FeatureOnlineStore
will also be deleted.
:param project_id: The ID of the Google Cloud project that contains the
feature store. If not provided, will attempt to determine from the
environment.
:param location: The Google Cloud region where the feature store is located
(e.g., 'us-central1', 'us-east1').
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_feature_online_store_admin_service_client(location)
return client.delete_feature_online_store(
name=f"projects/{project_id}/locations/{location}/featureOnlineStores/{feature_online_store_id}",
force=force,
timeout=timeout,
retry=retry,
metadata=metadata,
)
| FeatureStoreHook |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_background06.py | {
"start": 315,
"end": 1075
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("background06.xlsx")
self.ignore_elements = {"xl/worksheets/sheet1.xml": ["<pageSetup"]}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with a background image."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "logo.jpg")
worksheet.set_background(self.image_dir + "logo.jpg")
worksheet.set_header("&C&G", {"image_center": self.image_dir + "blue.jpg"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance1.py | {
"start": 6356,
"end": 6934
} | class ____(Base15[int]):
value: int
def func15(x: Base15[T]):
if isinstance(x, Child15):
# This should generate an error. It's here just to ensure that
# this code branch isn't marked unreachable.
reveal_type(x, expected_text="Never")
reveal_type(x, expected_text="Child15")
reveal_type(x.value, expected_text="int")
def func16(x: Any):
if isinstance(x, (int, int)):
reveal_type(x, expected_text="int")
def func17(x: Any):
if isinstance(x, (Union[int, int])):
reveal_type(x, expected_text="int")
| Child15 |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 2840,
"end": 3833
} | class ____(models.Model):
"""
django-extensions from version 3.0 officially supports only Django 2.2+ which
introduced Meta.constraints and suggests using it instead of Meta.unique_together
this is left only to ensure compatibility with Meta.unique_together
"""
uniq_field = UniqField(
max_length=255, boolean_attr=True, non_boolean_attr="non_boolean_attr"
)
common_field = models.CharField(max_length=10)
another_common_field = models.CharField(max_length=10)
many_to_one_field = models.ForeignKey(DummyRelationModel, on_delete=models.CASCADE)
one_to_one_field = models.OneToOneField(
SecondDummyRelationModel, on_delete=models.CASCADE
)
many_to_many_field = models.ManyToManyField(
ThirdDummyRelationModel, related_name="posts_with_uniq"
)
class Meta:
app_label = "django_extensions"
unique_together = (
"common_field",
"uniq_field",
)
| PostWithUniqFieldCompat |
python | PyCQA__pylint | tests/functional/u/unpacking/unpacking_non_sequence.py | {
"start": 2030,
"end": 2558
} | class ____:
""" Check unpacking as instance attributes. """
def test(self):
""" test unpacking in instance attributes. """
self.a, self.b = 1, 2
self.a, self.b = {1: 2, 2: 3}
self.a, self.b = "xy"
self.a, c = "xy"
c, self.a = good_unpacking()
self.a, self.b = Iter()
self.a, self.b = NonSeq() # [unpacking-non-sequence]
self.a, self.b = ValueError # [unpacking-non-sequence]
self.a, c = nonseq_func # [unpacking-non-sequence]
| ClassUnpacking |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/origin_info_test.py | {
"start": 1150,
"end": 11507
} | class ____(test.TestCase):
def test_create_source_map(self):
source = """
def test_fn(x):
return x + 1
"""
source = textwrap.dedent(source)
node = parser.parse(source)
fake_origin = origin_info.OriginInfo(
loc=origin_info.Location('fake_filename', 3, 7),
function_name='fake_function_name',
source_code_line='fake source line',
comment=None)
anno.setanno(node, anno.Basic.ORIGIN, fake_origin)
source_map = origin_info.create_source_map(node, source, 'test_filename')
loc = origin_info.LineLocation('test_filename', 2)
self.assertIn(loc, source_map)
self.assertIs(source_map[loc], fake_origin)
def _create_source_map(self, test_fn):
node, source = parser.parse_entity(test_fn, ())
origin_info.resolve_entity(node, source, test_fn)
# Creating a source map with the source code as output will create
# an identity map.
return origin_info.create_source_map(node, source, 'test_filename')
def test_create_source_map_identity(self):
test_fn = basic_definitions.simple_function
source_map = self._create_source_map(test_fn)
module_path = tf_inspect.getsourcefile(test_fn)
# Origin line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
definition_loc = origin_info.LineLocation('test_filename', 1)
self.assertIn(definition_loc, source_map)
self.assertEqual(source_map[definition_loc].loc.lineno, fn_start)
self.assertEqual(source_map[definition_loc].loc.filename, module_path)
self.assertEqual(source_map[definition_loc].function_name,
'simple_function')
def test_create_source_map_multiline_call(self):
test_fn = basic_definitions.function_with_multiline_call
source_map = self._create_source_map(test_fn)
module_path = tf_inspect.getsourcefile(test_fn)
# Origin line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
call_loc = origin_info.LineLocation('test_filename', 3)
self.assertIn(call_loc, source_map)
self.assertEqual(source_map[call_loc].loc.lineno, fn_start + 2)
self.assertEqual(source_map[call_loc].loc.filename, module_path)
self.assertEqual(source_map[call_loc].function_name,
'function_with_multiline_call')
self.assertEqual(source_map[call_loc].source_code_line, ' return range(')
second_arg_loc = origin_info.LineLocation('test_filename', 5)
self.assertIn(second_arg_loc, source_map)
self.assertEqual(source_map[second_arg_loc].loc.lineno, fn_start + 4)
self.assertEqual(source_map[second_arg_loc].loc.filename, module_path)
self.assertEqual(source_map[second_arg_loc].function_name,
'function_with_multiline_call')
self.assertEqual(source_map[second_arg_loc].source_code_line,
' x + 1,')
def test_create_source_map_no_origin_info(self):
test_fn = basic_definitions.simple_function
node, _ = parser.parse_entity(test_fn,
inspect_utils.getfutureimports(test_fn))
# No origin information should result in an empty map.
test_fn_lines, _ = tf_inspect.getsourcelines(test_fn)
source_map = origin_info.create_source_map(node, '\n'.join(test_fn_lines),
test_fn)
self.assertEmpty(source_map)
def test_resolve(self):
source = """
def test_fn(x):
'''Docstring.'''
return x # comment
"""
source = textwrap.dedent(source)
node = parser.parse(source)
origin_info.resolve(node, source, 'test_file', 10, 10)
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(def_origin.loc.lineno, 10)
self.assertEqual(def_origin.loc.col_offset, 10)
self.assertEqual(def_origin.source_code_line, 'def test_fn(x):')
self.assertIsNone(def_origin.comment)
docstring_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(docstring_origin.loc.lineno, 11)
self.assertEqual(docstring_origin.loc.col_offset, 12)
self.assertEqual(docstring_origin.source_code_line, " '''Docstring.'''")
self.assertIsNone(docstring_origin.comment)
ret_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(ret_origin.loc.lineno, 12)
self.assertEqual(ret_origin.loc.col_offset, 12)
self.assertEqual(ret_origin.source_code_line, ' return x # comment')
self.assertEqual(ret_origin.comment, 'comment')
def test_resolve_with_trailing_garbage(self):
# This comment will be missed because the tokenizer fails to reach it.
source = ' lambda: foo([], bar=1)), baz=2)()'
clean_source = 'lambda: foo([], bar=1)'
node = parser.parse(clean_source).value
origin_info.resolve(node, source, 'test_file', 10, 10)
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.lineno, 10)
self.assertEqual(def_origin.loc.col_offset, 10)
self.assertEqual(def_origin.source_code_line, source)
self.assertIsNone(def_origin.comment)
def test_resolve_entity(self):
test_fn = basic_definitions.simple_function
node, source = parser.parse_entity(
test_fn, inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.lineno, fn_start)
self.assertEqual(def_origin.loc.col_offset, 0)
self.assertEqual(def_origin.source_code_line, 'def simple_function(x):')
self.assertIsNone(def_origin.comment)
docstring_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(docstring_origin.loc.lineno, fn_start + 1)
self.assertEqual(docstring_origin.loc.col_offset, 2)
self.assertEqual(docstring_origin.source_code_line, ' """Docstring."""')
self.assertIsNone(docstring_origin.comment)
ret_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(ret_origin.loc.lineno, fn_start + 2)
self.assertEqual(ret_origin.loc.col_offset, 2)
self.assertEqual(ret_origin.source_code_line, ' return x # comment')
self.assertEqual(ret_origin.comment, 'comment')
def test_resolve_entity_nested_function(self):
test_fn = basic_definitions.nested_functions
node, source = parser.parse_entity(
test_fn, inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
inner_def_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(inner_def_origin.loc.lineno, fn_start + 3)
self.assertEqual(inner_def_origin.loc.col_offset, 2)
self.assertEqual(inner_def_origin.source_code_line, ' def inner_fn(y):')
self.assertIsNone(inner_def_origin.comment)
inner_ret_origin = anno.getanno(node.body[1].body[0], anno.Basic.ORIGIN)
self.assertEqual(inner_ret_origin.loc.lineno, fn_start + 4)
self.assertEqual(inner_ret_origin.loc.col_offset, 4)
self.assertEqual(inner_ret_origin.source_code_line, ' return y')
self.assertIsNone(inner_ret_origin.comment)
def test_resolve_entity_indented_block(self):
test_fn = basic_definitions.SimpleClass.simple_method
node, source = parser.parse_entity(test_fn,
inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.lineno, fn_start)
self.assertEqual(def_origin.loc.col_offset, 2)
self.assertEqual(def_origin.source_code_line, 'def simple_method(self):')
self.assertIsNone(def_origin.comment)
ret_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(ret_origin.loc.lineno, fn_start + 1)
self.assertEqual(ret_origin.loc.col_offset, 4)
self.assertEqual(ret_origin.source_code_line, ' return self')
self.assertIsNone(ret_origin.comment)
def test_resolve_entity_decorated_function(self):
test_fn = basic_definitions.decorated_function
node, source = parser.parse_entity(test_fn,
inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
if sys.version_info >= (3, 8):
self.assertEqual(def_origin.loc.lineno, fn_start + 2)
self.assertEqual(def_origin.source_code_line,
'def decorated_function(x):')
else:
self.assertEqual(def_origin.loc.lineno, fn_start)
self.assertEqual(def_origin.source_code_line, '@basic_decorator')
self.assertEqual(def_origin.loc.col_offset, 0)
self.assertIsNone(def_origin.comment)
if_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(if_origin.loc.lineno, fn_start + 3)
self.assertEqual(if_origin.loc.col_offset, 2)
self.assertEqual(if_origin.source_code_line, ' if x > 0:')
self.assertIsNone(if_origin.comment)
ret1_origin = anno.getanno(node.body[0].body[0], anno.Basic.ORIGIN)
self.assertEqual(ret1_origin.loc.lineno, fn_start + 4)
self.assertEqual(ret1_origin.loc.col_offset, 4)
self.assertEqual(ret1_origin.source_code_line, ' return 1')
self.assertIsNone(ret1_origin.comment)
ret2_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(ret2_origin.loc.lineno, fn_start + 5)
self.assertEqual(ret2_origin.loc.col_offset, 2)
self.assertEqual(ret2_origin.source_code_line, ' return 2')
self.assertIsNone(ret2_origin.comment)
if __name__ == '__main__':
test.main()
| OriginInfoTest |
python | scrapy__scrapy | scrapy/utils/log.py | {
"start": 1077,
"end": 6315
} | class ____(logging.Filter):
"""Keep only top level loggers' name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
``loggers`` list where it should act.
"""
def __init__(self, loggers: list[str] | None = None):
super().__init__()
self.loggers: list[str] = loggers or []
def filter(self, record: logging.LogRecord) -> bool:
if any(record.name.startswith(logger + ".") for logger in self.loggers):
record.name = record.name.split(".", 1)[0]
return True
DEFAULT_LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"loggers": {
"filelock": {
"level": "ERROR",
},
"hpack": {
"level": "ERROR",
},
"scrapy": {
"level": "DEBUG",
},
"twisted": {
"level": "ERROR",
},
},
}
def configure_logging(
settings: Settings | dict[_SettingsKey, Any] | None = None,
install_root_handler: bool = True,
) -> None:
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver("twisted")
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool("LOG_STDOUT"):
sys.stdout = StreamLogger(logging.getLogger("stdout"))
if install_root_handler:
install_scrapy_root_handler(settings)
_scrapy_root_handler: logging.Handler | None = None
def install_scrapy_root_handler(settings: Settings) -> None:
global _scrapy_root_handler # noqa: PLW0603 # pylint: disable=global-statement
_uninstall_scrapy_root_handler()
logging.root.setLevel(logging.NOTSET)
_scrapy_root_handler = _get_handler(settings)
logging.root.addHandler(_scrapy_root_handler)
def _uninstall_scrapy_root_handler() -> None:
global _scrapy_root_handler # noqa: PLW0603 # pylint: disable=global-statement
if (
_scrapy_root_handler is not None
and _scrapy_root_handler in logging.root.handlers
):
logging.root.removeHandler(_scrapy_root_handler)
_scrapy_root_handler = None
def get_scrapy_root_handler() -> logging.Handler | None:
return _scrapy_root_handler
def _get_handler(settings: Settings) -> logging.Handler:
"""Return a log handler object according to settings"""
filename = settings.get("LOG_FILE")
handler: logging.Handler
if filename:
mode = "a" if settings.getbool("LOG_FILE_APPEND") else "w"
encoding = settings.get("LOG_ENCODING")
handler = logging.FileHandler(filename, mode=mode, encoding=encoding)
elif settings.getbool("LOG_ENABLED"):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get("LOG_FORMAT"), datefmt=settings.get("LOG_DATEFORMAT")
)
handler.setFormatter(formatter)
handler.setLevel(settings.get("LOG_LEVEL"))
if settings.getbool("LOG_SHORT_NAMES"):
handler.addFilter(TopLevelFormatter(["scrapy"]))
return handler
def log_scrapy_info(settings: Settings) -> None:
logger.info(
"Scrapy %(version)s started (bot: %(bot)s)",
{"version": scrapy.__version__, "bot": settings["BOT_NAME"]},
)
software = settings.getlist("LOG_VERSIONS")
if not software:
return
versions = pprint.pformat(dict(get_versions(software)), sort_dicts=False)
logger.info(f"Versions:\n{versions}")
def log_reactor_info() -> None:
from twisted.internet import reactor
logger.debug("Using reactor: %s.%s", reactor.__module__, reactor.__class__.__name__)
if isinstance(reactor, asyncioreactor.AsyncioSelectorReactor):
logger.debug(
"Using asyncio event loop: %s.%s",
reactor._asyncioEventloop.__module__,
reactor._asyncioEventloop.__class__.__name__,
)
| TopLevelFormatter |
python | numpy__numpy | benchmarks/benchmarks/bench_manipulate.py | {
"start": 1993,
"end": 2159
} | class ____(ConcatenateStackArrays):
# Large number of small arrays to test GIL (non-)release
params = [[(1, 1)], [1000, 100000], TYPES1]
| ConcatenateNestedArrays |
python | google__pytype | pytype/rewrite/output_test.py | {
"start": 718,
"end": 2260
} | class ____(OutputTestBase):
def test_constant(self):
cls = self.make_value("""
class C:
X = 0
""")
pytd_cls = self.ctx.pytd_converter.to_pytd_def(cls)
self.assertPytdEqual(
pytd_cls,
"""
class C:
X: ClassVar[int]
""",
)
def test_method(self):
cls = self.make_value("""
class C:
def f(self):
return 0
""")
pytd_cls = self.ctx.pytd_converter.to_pytd_def(cls)
self.assertPytdEqual(
pytd_cls,
"""
class C:
def f(self) -> int: ...
""",
)
def test_nested_class(self):
cls = self.make_value("""
class C:
class D:
pass
""")
pytd_cls = self.ctx.pytd_converter.to_pytd_def(cls)
self.assertPytdEqual(
pytd_cls,
"""
class C:
class D: ...
""",
)
def test_instance_attribute(self):
cls = self.make_value("""
class C:
def __init__(self):
self.x = 42
""")
pytd_cls = self.ctx.pytd_converter.to_pytd_def(cls)
self.assertPytdEqual(
pytd_cls,
"""
class C:
x: int
def __init__(self) -> None: ...
""",
)
def test_metaclass(self):
cls = self.make_value("""
class Meta(type):
pass
class C(metaclass=Meta):
pass
""")
pytd_cls = self.ctx.pytd_converter.to_pytd_def(cls)
self.assertPytdEqual(
pytd_cls,
"""
class C(metaclass=Meta): ...
""",
)
| ClassToPytdDefTest |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 25130,
"end": 27424
} | class ____(Numeric):
"""
The base class for all the integral datatypes.
"""
default = 0
def __init__(self, field, config=None, pos=None):
Numeric.__init__(self, field, config, pos)
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
mask = False
if isinstance(value, str):
value = value.lower()
if value == "":
if config["version_1_3_or_later"]:
mask = True
else:
warn_or_raise(W49, W49, (), config, pos)
if self.null is not None:
value = self.null
else:
value = self.default
elif value == "nan":
mask = True
if self.null is None:
warn_or_raise(W31, W31, (), config, pos)
value = self.default
else:
value = self.null
elif value.startswith("0x"):
value = int(value[2:], 16)
else:
value = int(value, 10)
else:
value = int(value)
if self.null is not None and value == self.null:
mask = True
if value < self.val_range[0]:
warn_or_raise(W51, W51, (value, self.bit_size), config, pos)
value = self.val_range[0]
elif value > self.val_range[1]:
warn_or_raise(W51, W51, (value, self.bit_size), config, pos)
value = self.val_range[1]
return value, mask
def output(self, value, mask):
if mask:
if self.null is None:
warn_or_raise(W31, W31)
return "NaN"
return str(self.null)
return str(value)
def binoutput(self, value, mask):
if mask:
if self.null is None:
vo_raise(W31)
else:
value = self.null
value = _ensure_bigendian(value)
return value.tobytes()
def filter_array(self, value, mask):
if np.any(mask):
if self.null is not None:
return np.where(mask, self.null, value)
else:
vo_raise(W31)
return value
| Integer |
python | pyca__cryptography | tests/hazmat/primitives/test_cmac.py | {
"start": 1155,
"end": 6635
} | class ____:
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
AES(fake_key)
),
skip_message="Does not support CMAC.",
)
@pytest.mark.parametrize("params", vectors_aes)
def test_aes_generate(self, backend, params):
key = params["key"]
message = params["message"]
output = params["output"]
cmac = CMAC(AES(binascii.unhexlify(key)), backend)
cmac.update(binascii.unhexlify(message))
assert binascii.hexlify(cmac.finalize()) == output
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
AES(fake_key)
),
skip_message="Does not support CMAC.",
)
@pytest.mark.parametrize("params", vectors_aes)
def test_aes_verify(self, backend, params):
key = params["key"]
message = params["message"]
output = params["output"]
cmac = CMAC(AES(binascii.unhexlify(key)), backend)
cmac.update(binascii.unhexlify(message))
cmac.verify(binascii.unhexlify(output))
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
TripleDES(fake_key)
),
skip_message="Does not support CMAC.",
)
@pytest.mark.parametrize("params", vectors_3des)
def test_3des_generate(self, backend, params):
key1 = params["key1"]
key2 = params["key2"]
key3 = params["key3"]
key = key1 + key2 + key3
message = params["message"]
output = params["output"]
cmac = CMAC(TripleDES(binascii.unhexlify(key)), backend)
cmac.update(binascii.unhexlify(message))
assert binascii.hexlify(cmac.finalize()) == output
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
TripleDES(fake_key)
),
skip_message="Does not support CMAC.",
)
@pytest.mark.parametrize("params", vectors_3des)
def test_3des_verify(self, backend, params):
key1 = params["key1"]
key2 = params["key2"]
key3 = params["key3"]
key = key1 + key2 + key3
message = params["message"]
output = params["output"]
cmac = CMAC(TripleDES(binascii.unhexlify(key)), backend)
cmac.update(binascii.unhexlify(message))
cmac.verify(binascii.unhexlify(output))
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
AES(fake_key)
),
skip_message="Does not support CMAC.",
)
def test_invalid_verify(self, backend):
key = b"2b7e151628aed2a6abf7158809cf4f3c"
cmac = CMAC(AES(key), backend)
cmac.update(b"6bc1bee22e409f96e93d7e117393172a")
with pytest.raises(InvalidSignature):
cmac.verify(b"foobar")
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(ARC4(fake_key), None),
skip_message="Does not support CMAC.",
)
def test_invalid_algorithm(self, backend):
key = b"0102030405"
with pytest.raises(TypeError):
CMAC(ARC4(key), backend) # type: ignore[arg-type]
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
CMAC(DummyBlockCipherAlgorithm(b"bad"), backend)
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
AES(fake_key)
),
skip_message="Does not support CMAC.",
)
def test_raises_after_finalize(self, backend):
key = b"2b7e151628aed2a6abf7158809cf4f3c"
cmac = CMAC(AES(key), backend)
cmac.finalize()
with pytest.raises(AlreadyFinalized):
cmac.update(b"foo")
with pytest.raises(AlreadyFinalized):
cmac.copy()
with pytest.raises(AlreadyFinalized):
cmac.finalize()
with pytest.raises(AlreadyFinalized):
cmac.verify(b"")
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
AES(fake_key)
),
skip_message="Does not support CMAC.",
)
def test_verify_reject_unicode(self, backend):
key = b"2b7e151628aed2a6abf7158809cf4f3c"
cmac = CMAC(AES(key), backend)
with pytest.raises(TypeError):
cmac.update("") # type: ignore[arg-type]
with pytest.raises(TypeError):
cmac.verify("") # type: ignore[arg-type]
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
AES(fake_key)
),
skip_message="Does not support CMAC.",
)
def test_copy_with_backend(self, backend):
key = b"2b7e151628aed2a6abf7158809cf4f3c"
cmac = CMAC(AES(key), backend)
cmac.update(b"6bc1bee22e409f96e93d7e117393172a")
copy_cmac = cmac.copy()
assert cmac.finalize() == copy_cmac.finalize()
@pytest.mark.supported(
only_if=lambda backend: backend.cmac_algorithm_supported(
AES(fake_key)
),
skip_message="Does not support CMAC.",
)
def test_buffer_protocol(self, backend):
key = bytearray(b"2b7e151628aed2a6abf7158809cf4f3c")
cmac = CMAC(AES(key), backend)
cmac.update(b"6bc1bee22e409f96e93d7e117393172a")
assert cmac.finalize() == binascii.unhexlify(
b"a21e6e647bfeaf5ca0a5e1bcd957dfad"
)
| TestCMAC |
python | kamyu104__LeetCode-Solutions | Python/make-array-non-decreasing-or-non-increasing.py | {
"start": 63,
"end": 646
} | class ____(object):
def convertArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def f(nums):
result = 0
max_heap = []
for x in nums:
if max_heap and x < -max_heap[0]:
result += -heapq.heappop(max_heap)-x
heapq.heappush(max_heap, -x)
heapq.heappush(max_heap, -x)
return result
return min(f(nums), f((x for x in reversed(nums))))
# Time: O(n^2)
# Space: O(n)
import collections
# dp
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 48317,
"end": 49342
} | class ____(BaseModel):
"""
See source code for the fields' description.
The result and lifecycle state of the run.
"""
model_config = ConfigDict(extra="allow", frozen=True)
life_cycle_state: Optional[RunLifeCycleState] = Field(
None,
description=(
"A description of a run’s current location in the run lifecycle. This field"
" is always available in the response."
),
)
result_state: Optional[RunResultState] = None
state_message: Optional[str] = Field(
None,
description=(
"A descriptive message for the current state. This field is unstructured,"
" and its exact format is subject to change."
),
examples=[""],
)
user_cancelled_or_timedout: Optional[bool] = Field(
None,
description=(
"Whether a run was canceled manually by a user or by the scheduler because"
" the run timed out."
),
examples=[False],
)
| RunState |
python | django-mptt__django-mptt | mptt/models.py | {
"start": 1163,
"end": 1550
} | class ____(property):
def __init__(self, name, bases=(), members=None):
if members is None:
members = {}
return super().__init__(
members.get("__get__"),
members.get("__set__"),
members.get("__delete__"),
members.get("__doc__"),
)
classproperty = classpropertytype("classproperty")
| classpropertytype |
python | django__django | tests/migrations/test_migrations_no_default/0001_initial.py | {
"start": 43,
"end": 664
} | class ____(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="SillyModel",
fields=[
(
"id",
models.BigAutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("silly_field", models.BooleanField(default=False)),
],
options={},
bases=(models.Model,),
),
]
| Migration |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 15782,
"end": 17340
} | class ____(Benchmark):
r"""
Mishra 9 objective function.
This class defines the Mishra 9 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra09}}({x}) = \left[ ab^2c + abc^2 + b^2
+ (x_1 + x_2 - x_3)^2 \right]^2
Where, in this exercise:
.. math::
\begin{cases} a = 2x_1^3 + 5x_1x_2 + 4x_3 - 2x_1^2x_3 - 18 \\
b = x_1 + x_2^3 + x_1x_2^2 + x_1x_3^2 - 22 \\
c = 8x_1^2 + 2x_2x_3 + 2x_2^2 + 3x_2^3 - 52 \end{cases}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2, 3]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 1103
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1.0, 2.0, 3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
a = (2 * x[0] ** 3 + 5 * x[0] * x[1]
+ 4 * x[2] - 2 * x[0] ** 2 * x[2] - 18)
b = x[0] + x[1] ** 3 + x[0] * x[1] ** 2 + x[0] * x[2] ** 2 - 22.0
c = (8 * x[0] ** 2 + 2 * x[1] * x[2]
+ 2 * x[1] ** 2 + 3 * x[1] ** 3 - 52)
return (a * c * b ** 2 + a * b * c ** 2 + b ** 2
+ (x[0] + x[1] - x[2]) ** 2) ** 2
| Mishra09 |
python | fluentpython__example-code | 06-dp-1class-func/strategy_best3.py | {
"start": 1625,
"end": 2540
} | class ____: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self)
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
# BEGIN STRATEGY_BEST3
promos = [func for name, func in
inspect.getmembers(promotions, inspect.isfunction)]
def best_promo(order):
"""Select best discount available
"""
return max(promo(order) for promo in promos)
# END STRATEGY_BEST3
| Order |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP049_1.py | {
"start": 1139,
"end": 1161
} | class ____[_T, T]: ...
| C |
python | huggingface__transformers | src/transformers/models/qwen2_moe/modular_qwen2_moe.py | {
"start": 10241,
"end": 10637
} | class ____(MixtralForCausalLM, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.num_experts = config.num_experts
self.model = Qwen2MoeModel(config)
| Qwen2MoeForCausalLM |
python | pennersr__django-allauth | allauth/socialaccount/providers/coinbase/views.py | {
"start": 181,
"end": 1032
} | class ____(OAuth2Adapter):
provider_id = "coinbase"
@property
def authorize_url(self):
return "https://www.coinbase.com/oauth/authorize"
@property
def access_token_url(self):
return "https://www.coinbase.com/oauth/token"
@property
def profile_url(self):
return "https://api.coinbase.com/v2/user"
def complete_login(self, request, app, token, **kwargs):
response = (
get_adapter()
.get_requests_session()
.get(self.profile_url, params={"access_token": token})
)
extra_data = response.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(CoinbaseOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(CoinbaseOAuth2Adapter)
| CoinbaseOAuth2Adapter |
python | huggingface__transformers | src/transformers/models/longformer/modeling_longformer.py | {
"start": 75897,
"end": 81078
} | class ____(LongformerPreTrainedModel):
_tied_weights_keys = {
"lm_head.decoder.weight": "longformer.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
def __init__(self, config):
super().__init__(config)
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.lm_head = LongformerLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, LongformerMaskedLMOutput]:
r"""
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention. Tokens with global
attention attends to all other tokens, and all other tokens attend to them. This is important for
task-specific finetuning because it makes the model more flexible at representing the task. For example,
for classification, the <s> token should be given global attention. For QA, all question tokens should also
have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more
details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example Mask filling:
```python
>>> from transformers import AutoTokenizer, LongformerForMaskedLM
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096")
>>> model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096")
```
Let's try a very long input.
```python
>>> TXT = (
... "My friends are <mask> but they eat too many carbs."
... + " That's why I decide not to eat with them." * 300
... )
>>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
['healthy', 'skinny', 'thin', 'good', 'vegetarian']
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(prediction_scores.device)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return LongformerMaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@auto_docstring(
custom_intro="""
Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
| LongformerForMaskedLM |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 2707,
"end": 2855
} | class ____(factory.django.DjangoModelFactory):
class Meta:
model = models.WithFile
afile = factory.django.FileField()
| WithFileFactory |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 18174,
"end": 35851
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[]", L_z_: "f32[]", L_weird_b: "f32[]", L_weird_c: "f32[]"):
l_x_ = L_x_
l_z_ = L_z_
l_weird_b = L_weird_b
l_weird_c = L_weird_c
fwd_body_0 = self.fwd_body_0
bwd_body_0 = self.bwd_body_0
autograd_function_apply: "f32[]" = torch.ops.higher_order.autograd_function_apply(fwd_body_0, bwd_body_0, l_x_, l_z_, l_weird_b, l_weird_c, args_tensor_mask = [True, False, True], non_differentiable_idx = []); fwd_body_0 = bwd_body_0 = l_x_ = l_z_ = l_weird_b = l_weird_c = None
return (autograd_function_apply,)
class fwd_body_0(torch.nn.Module):
def forward(self, ctx : torch.autograd.function.Function, x: "f32[]", z: "f32[]", l_weird_b: "f32[]", l_weird_c: "f32[]"):
_set_grad_enabled = torch._C._set_grad_enabled(False); _set_grad_enabled = None
mul: "f32[]" = l_weird_b * l_weird_c
clone: "f32[]" = x.clone(); x = None
mul_1: "f32[]" = mul * clone; mul = clone = None
_set_grad_enabled_1 = torch._C._set_grad_enabled(True); _set_grad_enabled_1 = None
return (mul_1, [l_weird_b, l_weird_c])
class bwd_body_0(torch.nn.Module):
def forward(self, ctx : torch.autograd.function.Function, grad: "f32[]", l_weird_b: "f32[]", l_weird_c: "f32[]"):
_set_grad_enabled = torch._C._set_grad_enabled(False); _set_grad_enabled = None
mul: "f32[]" = grad * l_weird_b; l_weird_b = None
mul_1: "f32[]" = mul * l_weird_c; mul = l_weird_c = None
mul_2: "f32[]" = grad * 2; grad = None
_set_grad_enabled_1 = torch._C._set_grad_enabled(True); _set_grad_enabled_1 = None
return (mul_1, mul_2)
""",
)
def test_tensor_list_as_input(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x, tl):
ctx.save_for_backward(tl[0], tl[1])
return x.clone() * (tl[0] + tl[1])
@staticmethod
def backward(ctx, grad):
tl0, tl1 = ctx.saved_tensors
return grad * (tl0 + tl1), None
@torch.compile(backend="aot_eager", fullgraph=True)
def f(x, tl):
return Foo.apply(x, tl)
x = torch.tensor(2.0, requires_grad=True)
tl = [
torch.tensor(3.0, requires_grad=True),
torch.tensor(4.0, requires_grad=True),
]
result = f(x, tl)
result.sum().backward()
self.assertEqual(result, Foo.apply(x, tl))
self.assertEqual(x.grad, 7.0)
self.assertEqual(tl[0].grad, None)
self.assertEqual(tl[1].grad, None)
def test_multiple_different_non_tensor_inputs(self):
@dataclass
class Weird:
x: int
b: torch.Tensor
c: torch.Tensor
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weird, z, tl):
ctx.save_for_backward(weird.b, weird.c, tl[0], tl[1])
return x.clone() * weird.b * weird.c * tl[0]
@staticmethod
def backward(ctx, grad):
b, c, tl0, _ = ctx.saved_tensors
return grad * b * c * tl0, None, grad * 2, None
@torch.compile(backend="aot_eager", fullgraph=True)
def f(x, weird, z, tl):
return Foo.apply(x, weird, z, tl)
x = torch.tensor(2.0, requires_grad=True)
weird = Weird(
1.2,
torch.tensor(2.5, requires_grad=True),
torch.tensor(3.5, requires_grad=True),
)
z = torch.tensor(3.0, requires_grad=True)
tl = [
torch.tensor(0.5, requires_grad=True),
torch.tensor(0.6, requires_grad=True),
]
result = f(x, weird, z, tl)
result.sum().backward()
self.assertEqual(result, Foo.apply(x, weird, z, tl))
self.assertEqual(x.grad, 2.5 * 3.5 * 0.5)
self.assertEqual(z.grad, 2.0)
self.assertEqual(weird.b.grad, None)
self.assertEqual(weird.c.grad, None)
self.assertEqual(tl[0].grad, None)
self.assertEqual(tl[1].grad, None)
def test_backward_returns_none_for_tensor_input(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(y)
return x.clone() * y
@staticmethod
def backward(ctx, grad):
(y,) = ctx.saved_tensors
return grad * y, None
@torch.compile(backend="aot_eager", fullgraph=True)
def f(x, y):
return Foo.apply(x, y)
x = torch.tensor(2.0, requires_grad=True)
y = torch.tensor(3.0, requires_grad=True)
result = f(x, y)
result.sum().backward()
self.assertEqual(result, Foo.apply(x, y))
self.assertEqual(x.grad, 3.0)
self.assertEqual(y.grad, None)
def test_function_with_bound_free_variable(self):
class LowerBound(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs, bound):
ctx.save_for_backward(inputs, inputs.new_ones(1) * bound)
return inputs.clamp(min=bound)
@staticmethod
def backward(ctx, grad_output):
inputs, bound = ctx.saved_tensors
return (inputs >= bound) * grad_output, None
class MyMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.gamma = torch.nn.Parameter(torch.rand([4, 128, 32, 32]))
def forward(self, x):
gamma = LowerBound.apply(self.gamma, 1)
return x + gamma
mod = MyMod()
args, kwargs = ([torch.rand([4, 128, 32, 32])], {})
before = mod(*args, **kwargs)
compiled_model = torch.compile(mod, backend="eager")
after = compiled_model(*args, **kwargs)
self.assertEqual(before, after)
def test_forward_returns_constant(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x, [1, 2, 3] # Tensor and list of integers
@staticmethod
def backward(ctx, grad_output1, grad_output2):
return grad_output1
@torch.compile(backend="aot_eager", fullgraph=True)
def f(x):
return Foo.apply(x)
x = torch.tensor(2.0, requires_grad=True)
result = f(x)
result[0].sum().backward()
self.assertEqual(result, Foo.apply(x))
# I pulled all of these test cases from test_autograd.py
# In the future, we should make the Dynamo test suite actually
# run on test_autograd.py (it's disabled right now) and delete these.
def test_smoke_from_test_autograd(self):
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x**2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
class MyFunc2(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, gO):
return torch.tensor(float("nan")).expand(10, 10)
def run_fn(a): # noqa: F841
out = MyFunc2.apply(a)
return out.sum()
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
class MyAdder(torch.autograd.Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
return grad, grad
class InplaceMul(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
result = x.mul_(2)
ctx.mark_dirty(result)
return result
@staticmethod
def backward(ctx, grad_output):
pass
@staticmethod
def jvp(ctx, x_t):
if jvp_err: # noqa: F821
return x_t
else:
return x_t.mul_(2)
class MyFn2(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return x + y, x
@staticmethod
def vjp(ctx, gO1, gO2):
return gO1 + gO2, gO1
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t + y_t, fn(x_t) # noqa: F821
class MyFn3(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
def test():
x = torch.ones(2, 4, 4).requires_grad_()
mult2(x)
x = torch.tensor(2).double().requires_grad_()
double(x)
double2(x)
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
Identity.apply(x, y)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
MyFn.apply(a)
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
z = torch.tensor(1.0, requires_grad=True)
x = z.clone()
y = InplaceMul.apply(x)
a = torch.tensor(1.0, dtype=torch.double, requires_grad=True)
b = torch.tensor(1.0, dtype=torch.double, requires_grad=True)
c = torch.tensor(1.0, dtype=torch.double)
d = torch.tensor(1.0, dtype=torch.double)
MyFn2.apply(a, b)
MyFn2.apply(c, d)
base = torch.rand(10, requires_grad=True)
MyFn3.apply(base, False)
test()
opt_test = torch.compile(test, backend="eager")
opt_test()
def test_tensor_subclass_intermediary_input(self):
class FooTensor(torch.Tensor):
@staticmethod
def __new__(cls, data, config, scale):
self = torch.Tensor._make_wrapper_subclass(
cls,
config[0],
strides=config[1],
storage_offset=config[2],
dtype=config[3],
layout=config[4],
requires_grad=config[5],
device=data.device,
)
self._data = data
self._config = config
self._scale = scale
return self
def __repr__(self):
return "FooTensor"
def __tensor_flatten__(self):
return ("_data",), (
self._config,
self._scale,
)
@staticmethod
def __tensor_unflatten__(tensors, metadatas, outer_size, outer_stride):
return FooTensor(tensors["_data"], metadatas[0], metadatas[1])
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs=None):
# handling clone and view is so dynamo fakefication passes, it's not
# intended to be handling user code
if func == torch.ops.aten.clone.default:
return FooTensor(
args[0]._data.clone(), args[0]._config, args[0]._scale
)
elif func == torch.ops.aten.view.default:
new_data = args[0]._data.view(*args[1:])
return FooTensor(new_data, args[0]._config, args[0]._scale)
raise NotImplementedError
class foo_autograd_fn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
# access some data from `x`, where `x` is a tensor subclass
x2 = x._data + 1.0
# create and return a tensor subclass from within a torch.autograd.Function
x3 = FooTensor(x2, x._config, x._scale)
return x3._data
@staticmethod
def backward(ctx, g):
return g
x_ref = torch.randn(4, 4).requires_grad_(True)
x = copy.deepcopy(x_ref)
scale = torch.tensor(1.0)
# Weird that this is needed, but not having this breaks a lot of things
torch._dynamo.allow_in_graph(FooTensor)
def foo(x, scale):
config = (
x.size(),
x.stride(),
x.storage_offset(),
x.dtype,
x.layout,
x.requires_grad,
)
x = FooTensor(x, config, scale)
x = foo_autograd_fn.apply(x)
return x
y_ref = foo(x_ref, scale)
y_ref.sum().backward()
foo_opt = torch.compile(foo, backend="eager")
y = foo_opt(x, scale)
y.sum().backward()
self.assertEqual(y, y_ref)
self.assertEqual(x.grad, x_ref.grad)
def test_assert_is_contiguous_after_matmul(self):
class LinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight):
ctx.save_for_backward(x, weight)
y = x.matmul(weight.t())
return y
@staticmethod
def backward(ctx, grad_output):
x, weight = ctx.saved_tensors
grad_x = grad_output.matmul(weight)
assert grad_x.is_contiguous()
grad_weight = grad_output.transpose(0, 1).matmul(x)
return grad_x, grad_weight
def fn(x, weight):
return LinearFunction.apply(x, weight)
x1 = torch.randn(5, 3, requires_grad=True)
x2 = copy.deepcopy(x1)
W1 = torch.randn(4, 3, requires_grad=True)
W2 = copy.deepcopy(W1)
y1 = fn(x1, W1)
y1.sum().backward()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
y2 = opt_fn(x2, W2)
y2.sum().backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad)
self.assertEqual(W1.grad, W2.grad)
self.assertEqual(cnts.frame_count, 1)
def test_assert_is_contiguous_on_grad_output_directly(self):
class LinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight):
ctx.save_for_backward(x, weight)
y = x.matmul(weight.t())
return y
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_contiguous()
x, weight = ctx.saved_tensors
grad_x = grad_output.matmul(weight)
grad_weight = grad_output.transpose(0, 1).matmul(x)
return grad_x, grad_weight
def fn(x, weight):
return LinearFunction.apply(x, weight)
x1 = torch.randn(5, 3, requires_grad=True)
x2 = copy.deepcopy(x1)
W1 = torch.randn(4, 3, requires_grad=True)
W2 = copy.deepcopy(W1)
y1 = fn(x1, W1)
y1.backward(y1.clone().detach().requires_grad_(True))
cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager")
opt_fn = torch.compile(fn, backend=cnt)
y2 = opt_fn(x2, W2)
y2.backward(y2.clone().detach().requires_grad_(True))
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad)
self.assertEqual(W1.grad, W2.grad)
# Check the inserted .contiguous() call is there!
actual_graph = torch._dynamo.testing.normalize_gm(
cnt.graphs[0].print_readable(print_output=False)
)
self.assertExpectedInline(
actual_graph,
"""\
| GraphModule |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/fractional_avg_pool_op_test.py | {
"start": 14703,
"end": 24945
} | class ____(test.TestCase):
"""Tests for FractionalAvgPoolGrad.
Two types of tests for FractionalAvgPoolGrad.
1) Test fractional_avg_pool_grad() directly.
This type of test relies on gen_nn_ops.avg_pool_grad() returns the
correct result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
* stride_size = (1, 2, 2, 1)
* padding: not really important, since 10/2 is divisible
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 6, 8, 10]
* col_sequence = [0, 2, 4, 6, 8, 10]
* overlapping = False
This also means their gradients in such case will be the same.
Similarly, when
* input_tensor_shape = (1, 7, 7, 1)
* window_size = (1, 3, 3, 1)
* stride_size = (1, 2, 2, 1)
* padding: not important
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 7]
* col_sequence = [0, 2, 4, 7]
* overlapping = True
2) Test through compute_gradient_error()
"""
_PRNG = np.random.RandomState(341261004)
_SEED = 341261005
def _GenerateRandomInputTensor(self, shape):
num_elements = 1
for dim_size in shape:
num_elements *= dim_size
x = self._PRNG.rand(num_elements) * 1000
return x.reshape(shape)
def testDirectNotUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = row_window_size * 5
num_cols = col_window_size * 7
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.cached_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = self.evaluate(output_tensor)
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=False)
fap_input_backprop = self.evaluate(fap_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
def testDirectUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = (row_window_size - 1) * 5 + 1
num_cols = (col_window_size - 1) * 7 + 1
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.cached_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = self.evaluate(output_tensor)
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=True)
fap_input_backprop = self.evaluate(fap_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
@test_util.run_deprecated_v1
def testAllInputOptionsThroughGradientError(self):
input_shape = (1, 7, 13, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(3), 1]
for pseudo_random in True, False:
for overlapping in True, False:
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
@test_util.run_deprecated_v1
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateRandomInputTensor(input_shape)
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
@test_util.run_deprecated_v1
def testLargePoolingRatioThroughGradientError(self):
input_shape = (1, 17, 23, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = (1, math.sqrt(13), math.sqrt(7), 1)
output_shape = [int(a / b) for a, b in zip(input_shape, pooling_ratio)]
overlapping = True
pseudo_random = False
with self.cached_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testInvalidSeqRaiseErrorForFractionalAvgPoolGrad(self):
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
with self.cached_session() as _:
overlapping = True
orig_input_tensor_shape = constant_op.constant(
-1879048192, shape=[4], dtype=dtypes.int64)
out_backprop = constant_op.constant([],
shape=[0, 0, 0, 0],
dtype=dtypes.float64)
row_pooling_sequence = constant_op.constant(
1, shape=[4], dtype=dtypes.int64)
col_pooling_sequence = constant_op.constant(
1, shape=[4], dtype=dtypes.int64)
t = gen_nn_ops.fractional_avg_pool_grad(
orig_input_tensor_shape=orig_input_tensor_shape,
out_backprop=out_backprop,
row_pooling_sequence=row_pooling_sequence,
col_pooling_sequence=col_pooling_sequence,
overlapping=overlapping)
self.evaluate(t)
if __name__ == "__main__":
test.main()
| FractionalAvgPoolGradTest |
python | pydata__xarray | xarray/backends/common.py | {
"start": 8409,
"end": 9048
} | class ____(NdimSizeLenMixin, indexing.ExplicitlyIndexed):
__slots__ = ()
async def async_getitem(self, key: indexing.ExplicitIndexer) -> np.typing.ArrayLike:
raise NotImplementedError("Backend does not support asynchronous loading")
def get_duck_array(self, dtype: np.typing.DTypeLike | None = None):
key = indexing.BasicIndexer((slice(None),) * self.ndim)
return self[key] # type: ignore[index]
async def async_get_duck_array(self, dtype: np.typing.DTypeLike | None = None):
key = indexing.BasicIndexer((slice(None),) * self.ndim)
return await self.async_getitem(key)
| BackendArray |
python | django__django | django/core/validators.py | {
"start": 597,
"end": 2250
} | class ____:
regex = ""
message = _("Enter a valid value.")
code = "invalid"
inverse_match = False
flags = 0
def __init__(
self, regex=None, message=None, code=None, inverse_match=None, flags=None
):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, str):
raise TypeError(
"If the flags are set, regex must be a regular expression string."
)
self.regex = _lazy_re_compile(self.regex, self.flags)
def __call__(self, value):
"""
Validate that the input contains (or does *not* contain, if
inverse_match is True) a match for the regular expression.
"""
regex_matches = self.regex.search(str(value))
invalid_input = regex_matches if self.inverse_match else not regex_matches
if invalid_input:
raise ValidationError(self.message, code=self.code, params={"value": value})
def __eq__(self, other):
return (
isinstance(other, RegexValidator)
and self.regex.pattern == other.regex.pattern
and self.regex.flags == other.regex.flags
and (self.message == other.message)
and (self.code == other.code)
and (self.inverse_match == other.inverse_match)
)
@deconstructible
| RegexValidator |
python | explosion__spaCy | spacy/cli/init_config.py | {
"start": 765,
"end": 10423
} | class ____:
"""
Default values for initialization. Dedicated class to allow synchronized default values for init_config_cli() and
init_config(), i.e. initialization calls via CLI respectively Python.
"""
lang = "en"
pipeline = SimpleFrozenList(["tagger", "parser", "ner"])
optimize = Optimizations.efficiency
gpu = False
pretraining = False
force_overwrite = False
@init_cli.command("config")
def init_config_cli(
# fmt: off
output_file: Path = Arg(..., help="File to save the config to or - for stdout (will only output config and no additional logging info)", allow_dash=True),
lang: str = Opt(InitValues.lang, "--lang", "-l", help="Two-letter code of the language to use"),
pipeline: str = Opt(",".join(InitValues.pipeline), "--pipeline", "-p", help="Comma-separated names of trainable pipeline components to include (without 'tok2vec' or 'transformer')"),
optimize: Optimizations = Opt(InitValues.optimize, "--optimize", "-o", help="Whether to optimize for efficiency (faster inference, smaller model, lower memory consumption) or higher accuracy (potentially larger and slower model). This will impact the choice of architecture, pretrained weights and related hyperparameters."),
gpu: bool = Opt(InitValues.gpu, "--gpu", "-G", help="Whether the model can run on GPU. This will impact the choice of architecture, pretrained weights and related hyperparameters."),
pretraining: bool = Opt(InitValues.pretraining, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"),
force_overwrite: bool = Opt(InitValues.force_overwrite, "--force", "-F", help="Force overwriting the output file"),
# fmt: on
):
"""
Generate a starter config file for training. Based on your requirements
specified via the CLI arguments, this command generates a config with the
optimal settings for your use case. This includes the choice of architecture,
pretrained weights and related hyperparameters.
DOCS: https://spacy.io/api/cli#init-config
"""
pipeline = string_to_list(pipeline)
is_stdout = str(output_file) == "-"
if not is_stdout and output_file.exists() and not force_overwrite:
msg = Printer()
msg.fail(
"The provided output file already exists. To force overwriting the config file, set the --force or -F flag.",
exits=1,
)
config = init_config(
lang=lang,
pipeline=pipeline,
optimize=optimize.value,
gpu=gpu,
pretraining=pretraining,
silent=is_stdout,
)
save_config(config, output_file, is_stdout=is_stdout)
@init_cli.command("fill-config")
def init_fill_config_cli(
# fmt: off
base_path: Path = Arg(..., help="Path to base config to fill", exists=True, dir_okay=False),
output_file: Path = Arg("-", help="Path to output .cfg file (or - for stdout)", allow_dash=True),
pretraining: bool = Opt(False, "--pretraining", "-pt", help="Include config for pretraining (with 'spacy pretrain')"),
diff: bool = Opt(False, "--diff", "-D", help="Print a visual diff highlighting the changes"),
code_path: Optional[Path] = Opt(None, "--code-path", "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
# fmt: on
):
"""
Fill partial config file with default values. Will add all missing settings
from the default config and will create all objects, check the registered
functions for their default values and update the base config. This command
can be used with a config generated via the training quickstart widget:
https://spacy.io/usage/training#quickstart
DOCS: https://spacy.io/api/cli#init-fill-config
"""
import_code(code_path)
fill_config(output_file, base_path, pretraining=pretraining, diff=diff)
def fill_config(
output_file: Path,
base_path: Path,
*,
pretraining: bool = False,
diff: bool = False,
silent: bool = False,
) -> Tuple[Config, Config]:
is_stdout = str(output_file) == "-"
no_print = is_stdout or silent
msg = Printer(no_print=no_print)
with show_validation_error(hint_fill=False):
config = util.load_config(base_path)
nlp = util.load_model_from_config(config, auto_fill=True, validate=False)
# Load a second time with validation to be extra sure that the produced
# config result is a valid config
nlp = util.load_model_from_config(nlp.config)
filled = nlp.config
# If we have sourced components in the base config, those will have been
# replaced with their actual config after loading, so we have to re-add them
sourced = util.get_sourced_components(config)
filled["components"].update(sourced)
if pretraining:
validate_config_for_pretrain(filled, msg)
pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH)
filled = pretrain_config.merge(filled)
before = config.to_str()
after = filled.to_str()
if before == after:
msg.warn("Nothing to auto-fill: base config is already complete")
else:
msg.good("Auto-filled config with all values")
if diff and not no_print:
if before == after:
msg.warn("No diff to show: nothing was auto-filled")
else:
msg.divider("START CONFIG DIFF")
print("")
print(diff_strings(before, after))
msg.divider("END CONFIG DIFF")
print("")
save_config(filled, output_file, is_stdout=is_stdout, silent=silent)
return config, filled
def init_config(
*,
lang: str = InitValues.lang,
pipeline: List[str] = InitValues.pipeline,
optimize: str = InitValues.optimize,
gpu: bool = InitValues.gpu,
pretraining: bool = InitValues.pretraining,
silent: bool = True,
) -> Config:
msg = Printer(no_print=silent)
with TEMPLATE_PATH.open("r") as f:
template = Template(f.read())
# Filter out duplicates since tok2vec and transformer are added by template
pipeline = [pipe for pipe in pipeline if pipe not in ("tok2vec", "transformer")]
defaults = RECOMMENDATIONS["__default__"]
reco = RecommendationSchema(**RECOMMENDATIONS.get(lang, defaults)).dict()
variables = {
"lang": lang,
"components": pipeline,
"optimize": optimize,
"hardware": "gpu" if gpu else "cpu",
"transformer_data": reco["transformer"],
"word_vectors": reco["word_vectors"],
"has_letters": reco["has_letters"],
}
if variables["transformer_data"] and not has_spacy_transformers():
msg.warn(
"To generate a more effective transformer-based config (GPU-only), "
"install the spacy-transformers package and re-run this command. "
"The config generated now does not use transformers."
)
variables["transformer_data"] = None
base_template = template.render(variables).strip()
# Giving up on getting the newlines right in jinja for now
base_template = re.sub(r"\n\n\n+", "\n\n", base_template)
# Access variables declared in templates
template_vars = template.make_module(variables)
use_case = {
"Language": lang,
"Pipeline": ", ".join(pipeline),
"Optimize for": optimize,
"Hardware": variables["hardware"].upper(),
"Transformer": (
template_vars.transformer.get("name") # type: ignore[attr-defined]
if template_vars.use_transformer # type: ignore[attr-defined]
else None
),
}
msg.info("Generated config template specific for your use case")
for label, value in use_case.items():
msg.text(f"- {label}: {value}")
with show_validation_error(hint_fill=False):
config = util.load_config_from_str(base_template)
nlp = util.load_model_from_config(config, auto_fill=True)
config = nlp.config
if pretraining:
validate_config_for_pretrain(config, msg)
pretrain_config = util.load_config(DEFAULT_CONFIG_PRETRAIN_PATH)
config = pretrain_config.merge(config)
msg.good("Auto-filled config with all values")
return config
def save_config(
config: Config, output_file: Path, is_stdout: bool = False, silent: bool = False
) -> None:
no_print = is_stdout or silent
msg = Printer(no_print=no_print)
if is_stdout:
print(config.to_str())
else:
if not output_file.parent.exists():
output_file.parent.mkdir(parents=True)
config.to_disk(output_file, interpolate=False)
msg.good("Saved config", output_file)
msg.text("You can now add your data and train your pipeline:")
variables = ["--paths.train ./train.spacy", "--paths.dev ./dev.spacy"]
if not no_print:
print(f"{COMMAND} train {output_file.parts[-1]} {' '.join(variables)}")
def has_spacy_transformers() -> bool:
try:
import spacy_transformers # noqa: F401
return True
except ImportError:
return False
def validate_config_for_pretrain(config: Config, msg: Printer) -> None:
if "tok2vec" not in config["nlp"]["pipeline"]:
msg.warn(
"No tok2vec component found in the pipeline. If your tok2vec "
"component has a different name, you may need to adjust the "
"tok2vec_model reference in the [pretraining] block. If you don't "
"have a tok2vec component, make sure to add it to your [components] "
"and the pipeline specified in the [nlp] block, so you can pretrain "
"weights for it."
)
| InitValues |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_dag_parsing.py | {
"start": 1443,
"end": 4052
} | class ____:
@staticmethod
def clear_db():
clear_db_dag_parsing_requests()
@pytest.fixture(autouse=True)
def setup(self, session) -> None:
self.clear_db()
clear_db_logs()
def test_201_and_400_requests(self, url_safe_serializer, session, test_client):
parse_and_sync_to_db(EXAMPLE_DAG_FILE)
test_dag = DBDagBag(load_op_links=False).get_latest_version_of_dag(TEST_DAG_ID, session=session)
# grab the token
token = test_client.get(f"/dags/{TEST_DAG_ID}").json()["file_token"]
# First parsing request
url = f"/parseDagFile/{token}"
response = test_client.put(url, headers={"Accept": "application/json"})
assert response.status_code == 201
parsing_requests = session.scalars(select(DagPriorityParsingRequest)).all()
assert len(parsing_requests) == 1
assert parsing_requests[0].bundle_name == "dags-folder"
assert parsing_requests[0].relative_fileloc == test_dag.relative_fileloc
_check_last_log(session, dag_id=None, event="reparse_dag_file", logical_date=None)
# Duplicate file parsing request
response = test_client.put(url, headers={"Accept": "application/json"})
assert response.status_code == 409
parsing_requests = session.scalars(select(DagPriorityParsingRequest)).all()
assert len(parsing_requests) == 1
assert parsing_requests[0].bundle_name == "dags-folder"
assert parsing_requests[0].relative_fileloc == test_dag.relative_fileloc
_check_last_log(session, dag_id=None, event="reparse_dag_file", logical_date=None)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.put(
"/parseDagFile/token", headers={"Accept": "application/json"}
)
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.put("/parseDagFile/token", headers={"Accept": "application/json"})
assert response.status_code == 403
def test_bad_file_request(self, url_safe_serializer, session, test_client):
payload = {"bundle_name": "some_bundle", "relative_fileloc": "/some/random/file.py"}
url = f"/parseDagFile/{url_safe_serializer.dumps(payload)}"
response = test_client.put(url, headers={"Accept": "application/json"})
assert response.status_code == 404
parsing_requests = session.scalars(select(DagPriorityParsingRequest)).all()
assert parsing_requests == []
| TestDagParsingEndpoint |
python | ray-project__ray | python/ray/tests/test_autoscaler.py | {
"start": 6315,
"end": 10155
} | class ____(MockAutoscaler):
def update_nodes(self):
raise AssertionError(
"Node updaters are disabled. This method should not be accessed!"
)
SMALL_CLUSTER = {
"cluster_name": "default",
"idle_timeout_minutes": 5,
"max_workers": 2,
"provider": {
"type": "mock",
"region": "us-east-1",
"availability_zone": "us-east-1a",
},
"docker": {
"image": "example",
"container_name": "mock",
},
"auth": {
"ssh_user": "ubuntu",
"ssh_private_key": os.devnull,
},
"available_node_types": {
"head": {
"node_config": {
"TestProp": 1,
},
"resources": {"CPU": 1},
"max_workers": 0,
},
"worker": {
"node_config": {
"TestProp": 2,
},
"resources": {"CPU": 1},
"min_workers": 0,
"max_workers": 2,
},
},
"head_node_type": "head",
"file_mounts": {},
"cluster_synced_files": [],
"initialization_commands": ["init_cmd"],
"setup_commands": ["setup_cmd"],
"head_setup_commands": ["head_setup_cmd"],
"worker_setup_commands": ["worker_setup_cmd"],
"head_start_ray_commands": ["start_ray_head"],
"worker_start_ray_commands": ["start_ray_worker"],
}
MOCK_DEFAULT_CONFIG = {
"cluster_name": "default",
"max_workers": 2,
"upscaling_speed": 1.0,
"idle_timeout_minutes": 5,
"provider": {
"type": "mock",
"region": "us-east-1",
"availability_zone": "us-east-1a",
},
"docker": {
"image": "example",
"container_name": "mock",
},
"auth": {
"ssh_user": "ubuntu",
"ssh_private_key": os.devnull,
},
"available_node_types": {
"ray.head.default": {"resources": {}, "node_config": {"head_default_prop": 4}},
"ray.worker.default": {
"min_workers": 0,
"max_workers": 2,
"resources": {},
"node_config": {"worker_default_prop": 7},
},
},
"head_node_type": "ray.head.default",
"head_node": {},
"worker_nodes": {},
"file_mounts": {},
"cluster_synced_files": [],
"initialization_commands": [],
"setup_commands": [],
"head_setup_commands": [],
"worker_setup_commands": [],
"head_start_ray_commands": [],
"worker_start_ray_commands": [],
}
TYPES_A = {
"empty_node": {
"node_config": {
"FooProperty": 42,
"TestProp": 1,
},
"resources": {},
"max_workers": 0,
},
"m4.large": {
"node_config": {},
"resources": {"CPU": 2},
"max_workers": 10,
},
"m4.4xlarge": {
"node_config": {},
"resources": {"CPU": 16},
"max_workers": 8,
},
"m4.16xlarge": {
"node_config": {},
"resources": {"CPU": 64},
"max_workers": 4,
},
"p2.xlarge": {
"node_config": {},
"resources": {"CPU": 16, "GPU": 1},
"max_workers": 10,
},
"p2.8xlarge": {
"node_config": {},
"resources": {"CPU": 32, "GPU": 8},
"max_workers": 4,
},
}
MULTI_WORKER_CLUSTER = dict(
SMALL_CLUSTER, **{"available_node_types": TYPES_A, "head_node_type": "empty_node"}
)
# `DUMMY_IDLE_DURATION_S` is used as a dummy value for
# `node_idle_duration_s` in load_metrics.update()
# when we want to simulate worker node's idle duration
# (`total_resources` == `available_resources`),
# but don't want to cause autoscaler downscaling.
# (`DUMMY_IDLE_DURATION_S` < `idle_timeout_minutes`).
DUMMY_IDLE_DURATION_S = 3
exc_info = None
try:
raise Exception("Test exception.")
except Exception:
exc_info = sys.exc_info()
assert exc_info is not None
| NoUpdaterMockAutoscaler |
python | ray-project__ray | rllib/execution/segment_tree.py | {
"start": 6381,
"end": 7765
} | class ____(SegmentTree):
"""A SegmentTree with the reduction `operation`=operator.add."""
def __init__(self, capacity: int):
super(SumSegmentTree, self).__init__(capacity=capacity, operation=operator.add)
def sum(self, start: int = 0, end: Optional[Any] = None) -> Any:
"""Returns the sum over a sub-segment of the tree."""
return self.reduce(start, end)
def find_prefixsum_idx(self, prefixsum: float) -> int:
"""Finds highest i, for which: sum(arr[0]+..+arr[i - i]) <= prefixsum.
Args:
prefixsum: `prefixsum` upper bound in above constraint.
Returns:
int: Largest possible index (i) satisfying above constraint.
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
# Global sum node.
idx = 1
# Edge case when prefixsum can clip into the invalid regions
# https://github.com/ray-project/ray/issues/54284
if prefixsum >= self.value[idx]:
prefixsum = self.value[idx] - 1e-5
# While non-leaf (first half of tree).
while idx < self.capacity:
update_idx = 2 * idx
if self.value[update_idx] > prefixsum:
idx = update_idx
else:
prefixsum -= self.value[update_idx]
idx = update_idx + 1
return idx - self.capacity
| SumSegmentTree |
python | facebook__pyre-check | tools/generate_taint_models/tests/test_functions.py | {
"start": 573,
"end": 618
} | class ____(TestClass):
pass
| TestChildClassA |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/subclass_parametrization.py | {
"start": 369,
"end": 638
} | class ____:
start_idx: int
num_tensors: int
class_type: Any
attrs: dict[str, "SubclassCreationMeta"]
metadata: Any
outer_size: Iterable[Union[None, int, torch.SymInt]]
outer_stride: Iterable[Union[None, int, torch.SymInt]]
| SubclassCreationMeta |
python | PyCQA__pylint | tests/functional/p/protocol_classes.py | {
"start": 474,
"end": 540
} | class ____: #pylint:disable=too-few-public-methods
pass
| Protocol |
python | getsentry__sentry | src/sentry/incidents/models/alert_rule.py | {
"start": 11920,
"end": 12684
} | class ____(abc.ABC):
"""A factory for action handlers tied to a specific incident service.
The factory's builder method is augmented with metadata about which service it is
for and which target types that service supports.
"""
def __init__(
self,
slug: str,
service_type: ActionService,
supported_target_types: Iterable[ActionTarget],
integration_provider: str | None,
) -> None:
self.slug = slug
self.service_type = service_type
self.supported_target_types = frozenset(supported_target_types)
self.integration_provider = integration_provider
@abc.abstractmethod
def build_handler(self) -> ActionHandler:
raise NotImplementedError
| ActionHandlerFactory |
python | redis__redis-py | tests/test_asyncio/test_encoding.py | {
"start": 2470,
"end": 2945
} | class ____:
async def test_ignore(self, create_redis):
r = await create_redis(decode_responses=True, encoding_errors="ignore")
await r.set("a", b"foo\xff")
assert await r.get("a") == "foo"
async def test_replace(self, create_redis):
r = await create_redis(decode_responses=True, encoding_errors="replace")
await r.set("a", b"foo\xff")
assert await r.get("a") == "foo\ufffd"
@pytest.mark.onlynoncluster
| TestEncodingErrors |
python | geekcomputers__Python | Flappy Bird - created with tkinter/Background.py | {
"start": 109,
"end": 6162
} | class ____(Canvas):
"""
Classe para gerar um plano de fundo animado
"""
__background = []
__stop = False
def __init__(self, tk_instance, *geometry, fp="background.png", animation_speed=50):
# Verifica se o parâmetro tk_instance é uma instância de Tk
if not isinstance(tk_instance, Tk):
raise TypeError("The tk_instance argument must be an instance of Tk.")
# Recebe o caminho de imagem e a velocidade da animação
self.image_path = fp
self.animation_speed = animation_speed
# Recebe a largura e altura do widget
self.__width = geometry[0]
self.__height = geometry[1]
# Inicializa o construtor da classe Canvas
Canvas.__init__(
self, master=tk_instance, width=self.__width, height=self.__height
)
# Carrega a imagem que será usada no plano de fundo
self.__bg_image = self.getPhotoImage(
image_path=self.image_path,
width=self.__width,
height=self.__height,
closeAfter=True,
)[0]
# Cria uma imagem que será fixa, ou seja, que não fará parte da animação e serve em situações de bugs na animação
self.__background_default = self.create_image(
self.__width // 2, self.__height // 2, image=self.__bg_image
)
# Cria as imagens que serão utilizadas na animação do background
self.__background.append(
self.create_image(
self.__width // 2, self.__height // 2, image=self.__bg_image
)
)
self.__background.append(
self.create_image(
self.__width + (self.__width // 2),
self.__height // 2,
image=self.__bg_image,
)
)
def getBackgroundID(self):
"""
Retorna os id's das imagens de background
"""
return [self.__background_default, *self.__background]
@staticmethod
def getPhotoImage(
image=None, image_path=None, width=None, height=None, closeAfter=False
):
"""
Retorna um objeto da classe PIL.ImageTk.PhotoImage de uma imagem e as imagens criadas de PIL.Image
(photoImage, new, original)
@param image: Instância de PIL.Image.open
@param image_path: Diretório da imagem
@param width: Largura da imagem
@param height: Altura da imagem
@param closeAfter: Se True, a imagem será fechada após ser criado um PhotoImage da mesma
"""
if not image:
if not image_path:
return
# Abre a imagem utilizando o caminho dela
image = openImage(image_path)
# Será redimesionada a imagem somente se existir um width ou height
if not width:
width = image.width
if not height:
height = image.height
# Cria uma nova imagem já redimensionada
newImage = image.resize([width, height])
# Cria um photoImage
photoImage = PhotoImage(newImage)
# Se closeAfter for True, ele fecha as imagens
if closeAfter:
# Fecha a imagem nova
newImage.close()
newImage = None
# Fecha a imagem original
image.close()
image = None
# Retorna o PhotoImage da imagem,a nova imagem que foi utilizada e a imagem original
return photoImage, newImage, image
def reset(self):
"""
Método para resetar o background, apagando todos os itens que não sejam o plano de fundo
"""
# Deleta todos os itens do canvas
self.delete("all")
# Para a animação passando False para o atributo "stop"
self.__stop = False
# Limpa a lista de imagens usadas na animação
self.__background.clear()
# Cria uma imagem que será fixa, ou seja, que não fará parte da animação e serve em situações de bugs na animação
self.__background_default = self.create_image(
self.__width // 2, self.__height // 2, image=self.__bg_image
)
# Cria as imagens que serão utilizadas na animação do background
self.__background.append(
self.create_image(
self.__width // 2, self.__height // 2, image=self.__bg_image
)
)
self.__background.append(
self.create_image(
self.__width + (self.__width // 2),
self.__height // 2,
image=self.__bg_image,
)
)
def run(self):
"""
Método para iniciar a animação do background
"""
# Enquanto o atributo "stop" for False, a animação continuará em um loop infinito
if not self.__stop:
# Move as imagens de background na posição X
self.move(self.__background[0], -10, 0)
self.move(self.__background[1], -10, 0)
self.tag_lower(self.__background[0])
self.tag_lower(self.__background[1])
self.tag_lower(self.__background_default)
# Se a primeira imagem da lista tiver saído da área do widget, uma nova será criada depois da segunda imagem
if self.bbox(self.__background[0])[2] <= 0:
# Deleta a primeira imagem da lista (imagem que saiu da área do widget)
self.delete(self.__background[0])
self.__background.remove(self.__background[0])
# Cria uma nova imagem a partir da última imagem da animação
width = self.bbox(self.__background[0])[2] + self.__width // 2
self.__background.append(
self.create_image(width, self.__height // 2, image=self.__bg_image)
)
# Executa novamente o método depois de um certo tempo
self.after(self.animation_speed, self.run)
def stop(self):
"""
Método para parar a animação do background
"""
self.__stop = True
| Background |
python | kamyu104__LeetCode-Solutions | Python/best-time-to-buy-and-sell-stock-with-cooldown.py | {
"start": 30,
"end": 798
} | class ____(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
buy, sell, coolDown = [0] * 2, [0] * 2, [0] * 2
buy[0] = -prices[0]
for i in xrange(1, len(prices)):
# Bought before or buy today.
buy[i % 2] = max(buy[(i - 1) % 2],
coolDown[(i - 1) % 2] - prices[i])
# Sell today.
sell[i % 2] = buy[(i - 1) % 2] + prices[i]
# Sold before yesterday or sold yesterday.
coolDown[i % 2] = max(coolDown[(i - 1) % 2], sell[(i - 1) % 2])
return max(coolDown[(len(prices) - 1) % 2],
sell[(len(prices) - 1) % 2])
| Solution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/external.py | {
"start": 2476,
"end": 2685
} | class ____(graphene.ObjectType):
name = graphene.NonNull(graphene.String)
version = graphene.NonNull(graphene.String)
class Meta:
name = "DagsterLibraryVersion"
| GrapheneDagsterLibraryVersion |
python | django-haystack__django-haystack | test_haystack/core/models.py | {
"start": 2102,
"end": 2234
} | class ____(models.Model):
score = models.CharField(max_length=10)
def __str__(self):
return self.score
| ScoreMockModel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.