language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
tests/models/reformer/test_modeling_reformer.py
|
{
"start": 31452,
"end": 39316
}
|
class ____(
ReformerTesterMixin, ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase
):
all_model_classes = (
(ReformerModel, ReformerModelWithLMHead, ReformerForSequenceClassification, ReformerForQuestionAnswering)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": ReformerModel,
"fill-mask": ReformerForMaskedLM,
"question-answering": ReformerForQuestionAnswering,
"text-classification": ReformerForSequenceClassification,
"text-generation": ReformerModelWithLMHead,
"zero-shot": ReformerForSequenceClassification,
}
if is_torch_available()
else {}
)
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if (
pipeline_test_case_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def setUp(self):
self.model_tester = ReformerModelTester(
self,
batch_size=13,
seq_length=13,
use_input_mask=True,
use_labels=True,
is_training=False,
is_decoder=True,
vocab_size=32,
attention_head_size=16,
hidden_size=64,
num_attention_heads=2,
num_buckets=2,
num_hashes=4,
lsh_attn_chunk_length=4,
lsh_num_chunks_before=1,
lsh_num_chunks_after=0,
chunk_size_lm_head=5,
chunk_size_feed_forward=6,
feed_forward_size=32,
hidden_act="relu",
hidden_dropout_prob=0.1,
lsh_attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02,
axial_norm_std=1.0,
layer_norm_eps=1e-12,
axial_pos_embds=True,
axial_pos_shape=[4, 8],
axial_pos_embds_dim=[16, 48],
# sanotheu
# attn_layers=[lsh,lsh,lsh,lsh],
attn_layers=["lsh"],
pad_token_id=0,
eos_token_id=2,
scope=None,
hash_seed=0,
num_labels=2,
)
self.config_tester = ConfigTester(self, config_class=ReformerConfig, hidden_size=37)
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
# NOTE (joao): this function is substantially different from the original, the attention has different
# *number* of shapes in certain conditions
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, list) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (output_length - prompt_length))
for generated_length, iter_attentions in enumerate(attentions):
use_cache = decoder_past_key_values is not None and generated_length > 0
model_input_len = prompt_length + generated_length if not use_cache else 1
num_chunks = model_input_len // config.lsh_attn_chunk_length + (
model_input_len % config.lsh_attn_chunk_length != 0
)
model_input_chunk_len = config.lsh_attn_chunk_length
query_chunk_len = config.lsh_attn_chunk_length * (
1 + config.lsh_num_chunks_after + config.lsh_num_chunks_before
)
if use_cache:
expected_shape = (
batch_size,
config.num_attention_heads,
config.num_hashes,
model_input_len,
config.num_hashes * (1 + config.lsh_num_chunks_after + config.lsh_num_chunks_before),
)
else:
expected_shape = (
batch_size,
config.num_attention_heads,
num_chunks * config.num_hashes,
model_input_chunk_len,
query_chunk_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions)
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
# NOTE (joao): this function is substantially different from the original, the hidden states have different
# length in certain conditions
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, list) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (output_length - prompt_length))
for generation_length, iter_hidden_states in enumerate(hidden_states):
use_cache_this_iter = use_cache and generation_length > 0
model_input_length = prompt_length + generation_length
model_output_length = config.local_attn_chunk_length * (
model_input_length // config.local_attn_chunk_length
+ (model_input_length % config.local_attn_chunk_length != 0)
)
if use_cache_this_iter:
model_output_length = 1
expected_shape = (batch_size, model_output_length, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, ReformerDynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
hidden_size = getattr(config, "d_model", config.hidden_size)
head_dim = getattr(config, "head_dim", hidden_size // config.num_attention_heads)
# For cross attention cache, the seq_length depends on the model, so we remove that dim
expected_shape = (batch_size, seq_length, num_heads * head_dim)
# Check the size is coherent
self.assertEqual(config.num_hidden_layers, len(past_key_values))
# Check each layer has the correct shape
for idx in range(len(past_key_values)):
self.assertEqual(past_key_values.states_cache[idx].shape, expected_shape)
@unittest.skip(reason="Fails because the sequence length is not a multiple of 4")
def test_problem_types(self):
pass
@unittest.skip(reason="Fails because the sequence length is not a multiple of 4")
def test_past_key_values_format(self):
pass
@unittest.skip(reason="The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def test_left_padding_compatibility(self):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
|
ReformerLSHAttnModelTest
|
python
|
Pylons__pyramid
|
tests/test_scripts/dummy.py
|
{
"start": 3540,
"end": 4827
}
|
class ____:
def __init__(
self, settings=None, app_settings=None, app=None, server=None
):
if not settings:
settings = {}
if not app_settings:
app_settings = {}
self.settings = settings
self.app_settings = app_settings
self.app = app
self.server = server
self.calls = []
def __call__(self, uri):
import plaster
self.uri = plaster.parse_uri(uri)
return self
def add_call(self, op, name, defaults):
self.calls.append({'op': op, 'name': name, 'defaults': defaults})
def get_settings(self, name=None, defaults=None):
self.add_call('settings', name, defaults)
return self.settings.get(name, {})
def get_wsgi_app(self, name=None, defaults=None):
self.add_call('app', name, defaults)
return self.app
def get_wsgi_app_settings(self, name=None, defaults=None):
self.add_call('app_settings', name, defaults)
return self.app_settings
def get_wsgi_server(self, name=None, defaults=None):
self.add_call('server', name, defaults)
return self.server
def setup_logging(self, defaults):
self.add_call('logging', None, defaults)
self.defaults = defaults
|
DummyLoader
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/exc.py
|
{
"start": 2236,
"end": 2361
}
|
class ____(sa_exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
|
UnmappedError
|
python
|
getsentry__sentry
|
src/sentry/monitors/endpoints/organization_monitor_details.py
|
{
"start": 868,
"end": 3075
}
|
class ____(MonitorEndpoint, MonitorDetailsMixin):
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"GET": ApiPublishStatus.PUBLIC,
"PUT": ApiPublishStatus.PUBLIC,
}
owner = ApiOwner.CRONS
@extend_schema(
operation_id="Retrieve a Monitor",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
GlobalParams.ENVIRONMENT,
],
responses={
200: MonitorSerializer,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, organization, project, monitor) -> Response:
"""
Retrieves details for a monitor.
"""
return self.get_monitor(request, project, monitor)
@extend_schema(
operation_id="Update a Monitor",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
],
request=MonitorValidator,
responses={
200: MonitorSerializer,
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def put(self, request: AuthenticatedHttpRequest, organization, project, monitor) -> Response:
"""
Update a monitor.
"""
return self.update_monitor(request, project, monitor)
@extend_schema(
operation_id="Delete a Monitor or Monitor Environments",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
GlobalParams.ENVIRONMENT,
],
request=MonitorValidator,
responses={
202: RESPONSE_ACCEPTED,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(self, request: Request, organization, project, monitor) -> Response:
"""
Delete a monitor or monitor environments.
"""
return self.delete_monitor(request, project, monitor)
|
OrganizationMonitorDetailsEndpoint
|
python
|
great-expectations__great_expectations
|
tests/integration/test_utils/data_source_config/big_query.py
|
{
"start": 736,
"end": 1541
}
|
class ____(DataSourceTestConfig):
@property
@override
def label(self) -> str:
return "big-query"
@property
@override
def pytest_mark(self) -> pytest.MarkDecorator:
return pytest.mark.bigquery
@override
def create_batch_setup(
self,
request: pytest.FixtureRequest,
data: pd.DataFrame,
extra_data: Mapping[str, pd.DataFrame],
context: AbstractDataContext,
engine_manager: Optional[SessionSQLEngineManager] = None,
) -> BatchTestSetup:
return BigQueryBatchTestSetup(
data=data,
config=self,
extra_data=extra_data,
table_name=self.table_name,
context=context,
engine_manager=engine_manager,
)
|
BigQueryDatasourceTestConfig
|
python
|
facelessuser__soupsieve
|
tests/test_extra/test_attribute.py
|
{
"start": 54,
"end": 1491
}
|
class ____(util.TestCase):
"""Test attribute selectors."""
MARKUP = """
<div id="div">
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
def test_attribute_not_equal_no_quotes(self):
"""Test attribute with value that does not equal specified value (no quotes)."""
# No quotes
self.assert_selector(
self.MARKUP,
'body [id!=\\35]',
["div", "0", "1", "2", "3", "pre", "4", "6"],
flags=util.HTML5
)
def test_attribute_not_equal_quotes(self):
"""Test attribute with value that does not equal specified value (quotes)."""
# Quotes
self.assert_selector(
self.MARKUP,
"body [id!='5']",
["div", "0", "1", "2", "3", "pre", "4", "6"],
flags=util.HTML5
)
def test_attribute_not_equal_double_quotes(self):
"""Test attribute with value that does not equal specified value (double quotes)."""
# Double quotes
self.assert_selector(
self.MARKUP,
'body [id!="5"]',
["div", "0", "1", "2", "3", "pre", "4", "6"],
flags=util.HTML5
)
|
TestAttribute
|
python
|
django__django
|
django/contrib/gis/db/backends/oracle/operations.py
|
{
"start": 2047,
"end": 9120
}
|
class ____(BaseSpatialOperations, DatabaseOperations):
name = "oracle"
oracle = True
disallowed_aggregates = (models.Collect, models.Extent3D, models.MakeLine)
Adapter = OracleSpatialAdapter
extent = "SDO_AGGR_MBR"
unionagg = "SDO_AGGR_UNION"
from_text = "SDO_GEOMETRY"
function_names = {
"Area": "SDO_GEOM.SDO_AREA",
"AsGeoJSON": "SDO_UTIL.TO_GEOJSON",
"AsWKB": "SDO_UTIL.TO_WKBGEOMETRY",
"AsWKT": "SDO_UTIL.TO_WKTGEOMETRY",
"BoundingCircle": "SDO_GEOM.SDO_MBC",
"Centroid": "SDO_GEOM.SDO_CENTROID",
"Difference": "SDO_GEOM.SDO_DIFFERENCE",
"Distance": "SDO_GEOM.SDO_DISTANCE",
"Envelope": "SDO_GEOM_MBR",
"FromWKB": "SDO_UTIL.FROM_WKBGEOMETRY",
"FromWKT": "SDO_UTIL.FROM_WKTGEOMETRY",
"Intersection": "SDO_GEOM.SDO_INTERSECTION",
"IsValid": "SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT",
"Length": "SDO_GEOM.SDO_LENGTH",
"NumGeometries": "SDO_UTIL.GETNUMELEM",
"NumPoints": "SDO_UTIL.GETNUMVERTICES",
"Perimeter": "SDO_GEOM.SDO_LENGTH",
"PointOnSurface": "SDO_GEOM.SDO_POINTONSURFACE",
"Reverse": "SDO_UTIL.REVERSE_LINESTRING",
"SymDifference": "SDO_GEOM.SDO_XOR",
"Transform": "SDO_CS.TRANSFORM",
"Union": "SDO_GEOM.SDO_UNION",
}
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = "SDO_UTIL.TO_WKBGEOMETRY(%s)"
gis_operators = {
"contains": SDOOperator(func="SDO_CONTAINS"),
"coveredby": SDOOperator(func="SDO_COVEREDBY"),
"covers": SDOOperator(func="SDO_COVERS"),
"disjoint": SDODisjoint(),
"intersects": SDOOperator(
func="SDO_OVERLAPBDYINTERSECT"
), # TODO: Is this really the same as ST_Intersects()?
"equals": SDOOperator(func="SDO_EQUAL"),
"exact": SDOOperator(func="SDO_EQUAL"),
"overlaps": SDOOperator(func="SDO_OVERLAPS"),
"same_as": SDOOperator(func="SDO_EQUAL"),
# Oracle uses a different syntax, e.g., 'mask=inside+touch'
"relate": SDORelate(),
"touches": SDOOperator(func="SDO_TOUCH"),
"within": SDOOperator(func="SDO_INSIDE"),
"dwithin": SDODWithin(),
}
@cached_property
def unsupported_functions(self):
unsupported = {
"AsKML",
"AsSVG",
"Azimuth",
"ClosestPoint",
"ForcePolygonCW",
"GeoHash",
"GeometryDistance",
"IsEmpty",
"LineLocatePoint",
"MakeValid",
"MemSize",
"NumDimensions",
"Rotate",
"Scale",
"SnapToGrid",
"Translate",
}
if self.connection.oracle_version < (23,):
unsupported.add("GeometryType")
return unsupported
def geo_quote_name(self, name):
return super().geo_quote_name(name).upper()
def convert_extent(self, clob):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = GEOSGeometry(memoryview(clob.read()))
gtype = str(ext_geom.geom_type)
if gtype == "Polygon":
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == "Point":
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception(
"Unexpected geometry type returned for extent: %s" % gtype
)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def geo_db_type(self, f):
"""
Return the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return "MDSYS.SDO_GEOMETRY"
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(
value, Distance.unit_attname(f.units_name(self.connection))
)
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == "dwithin":
dist_param = "distance=%s" % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
if value is None:
return "NULL"
return super().get_geom_placeholder(f, value, compiler)
def spatial_aggregate_name(self, agg_name):
"""
Return the spatial aggregate SQL name.
"""
agg_name = "unionagg" if agg_name.lower() == "union" else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholder, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle
Spatial backend due to #10888.
"""
if placeholder == "NULL":
return []
return super().modify_insert_params(placeholder, params)
def get_geometry_converter(self, expression):
read = wkb_r().read
srid = expression.output_field.srid
if srid == -1:
srid = None
geom_class = expression.output_field.geom_class
def converter(value, expression, connection):
if value is not None:
geom = GEOSGeometryBase(read(memoryview(value.read())), geom_class)
if srid:
geom.srid = srid
return geom
return converter
def get_area_att_for_field(self, field):
return "sq_m"
|
OracleOperations
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-databricks/prefect_databricks/models/jobs.py
|
{
"start": 42161,
"end": 42455
}
|
class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
full_refresh: Optional[bool] = Field(
None, description="If true, triggers a full refresh on the delta live table."
)
|
PipelineParams
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/kernel_tests/choose_from_datasets_test.py
|
{
"start": 1345,
"end": 6136
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasets(self):
words = [b"foo", b"bar", b"baz"]
datasets = [dataset_ops.Dataset.from_tensors(w).repeat() for w in words]
choice_array = np.random.randint(3, size=(15,), dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(datasets, choice_dataset)
next_element = self.getNext(dataset)
for i in choice_array:
self.assertEqual(words[i], self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsStoppingOnEmptyDataset(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(2),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
choice_array = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets, choice_dataset, stop_on_empty_dataset=True)
self.assertDatasetProduces(dataset, [b"foo", b"foo"])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsSkippingEmptyDatasets(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(2),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
choice_array = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets, choice_dataset, stop_on_empty_dataset=False)
# Chooses 2 elements from the first dataset while the selector specifies 3.
self.assertDatasetProduces(
dataset,
[b"foo", b"foo", b"bar", b"bar", b"bar", b"baz", b"baz", b"baz"])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsChoiceDatasetIsEmpty(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets,
choice_dataset=dataset_ops.Dataset.range(0),
stop_on_empty_dataset=False)
self.assertDatasetProduces(dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsNested(self):
ds1 = dataset_ops.Dataset.range(10).window(2)
ds2 = dataset_ops.Dataset.range(10, 20).window(2)
choice_dataset = dataset_ops.Dataset.range(2).repeat(5)
ds = dataset_ops.Dataset.choose_from_datasets([ds1, ds2], choice_dataset)
ds = ds.flat_map(lambda x: x)
expected = []
for i in range(5):
for j in range(2):
expected.extend([10*j + 2*i, 10*j + 2*i + 1])
self.assertDatasetProduces(ds, expected)
@combinations.generate(test_base.default_test_combinations())
def testErrors(self):
with self.assertRaisesRegex(TypeError, "tf.int64"):
dataset_ops.Dataset.choose_from_datasets(
[
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
],
choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
with self.assertRaisesRegex(TypeError, "scalar"):
dataset_ops.Dataset.choose_from_datasets(
[
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
],
choice_dataset=dataset_ops.Dataset.from_tensors([1.0]))
with self.assertRaisesRegex(errors.InvalidArgumentError, "out of range"):
dataset = dataset_ops.Dataset.choose_from_datasets(
[dataset_ops.Dataset.from_tensors(0)],
choice_dataset=dataset_ops.Dataset.from_tensors(
constant_op.constant(1, dtype=dtypes.int64)))
next_element = self.getNext(dataset)
self.evaluate(next_element())
with self.assertRaisesRegex(
ValueError, r"Invalid `datasets`. `datasets` should not be empty."):
dataset_ops.Dataset.choose_from_datasets(
datasets=[], choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
with self.assertRaisesRegex(
TypeError, r"`choice_dataset` should be a `tf.data.Dataset`"):
datasets = [dataset_ops.Dataset.range(42)]
dataset_ops.Dataset.choose_from_datasets(datasets, choice_dataset=None)
|
ChooseFromDatasetsTest
|
python
|
django__django
|
tests/base/models.py
|
{
"start": 261,
"end": 318
}
|
class ____(models.base.ModelBase):
pass
|
CustomBaseModel
|
python
|
pandas-dev__pandas
|
pandas/tests/computation/test_eval.py
|
{
"start": 63522,
"end": 73045
}
|
class ____:
def test_global_scope(self, engine, parser):
e = "_var_s * 2"
tm.assert_numpy_array_equal(
_var_s * 2, pd.eval(e, engine=engine, parser=parser)
)
def test_no_new_locals(self, engine, parser):
x = 1
lcls = locals().copy()
pd.eval("x + 1", local_dict=lcls, engine=engine, parser=parser)
lcls2 = locals().copy()
lcls2.pop("lcls")
assert lcls == lcls2
def test_no_new_globals(self, engine, parser):
x = 1 # noqa: F841
gbls = globals().copy()
pd.eval("x + 1", engine=engine, parser=parser)
gbls2 = globals().copy()
assert gbls == gbls2
def test_empty_locals(self, engine, parser):
# GH 47084
x = 1 # noqa: F841
msg = "name 'x' is not defined"
with pytest.raises(UndefinedVariableError, match=msg):
pd.eval("x + 1", engine=engine, parser=parser, local_dict={})
def test_empty_globals(self, engine, parser):
# GH 47084
msg = "name '_var_s' is not defined"
e = "_var_s * 2"
with pytest.raises(UndefinedVariableError, match=msg):
pd.eval(e, engine=engine, parser=parser, global_dict={})
@td.skip_if_no("numexpr")
def test_invalid_engine():
msg = "Invalid engine 'asdf' passed"
with pytest.raises(KeyError, match=msg):
pd.eval("x + y", local_dict={"x": 1, "y": 2}, engine="asdf")
@td.skip_if_no("numexpr")
@pytest.mark.parametrize(
("use_numexpr", "expected"),
(
(True, "numexpr"),
(False, "python"),
),
)
def test_numexpr_option_respected(use_numexpr, expected):
# GH 32556
from pandas.core.computation.eval import _check_engine
with pd.option_context("compute.use_numexpr", use_numexpr):
result = _check_engine(None)
assert result == expected
@td.skip_if_no("numexpr")
def test_numexpr_option_incompatible_op():
# GH 32556
with pd.option_context("compute.use_numexpr", False):
df = DataFrame(
{"A": [True, False, True, False, None, None], "B": [1, 2, 3, 4, 5, 6]}
)
result = df.query("A.isnull()")
expected = DataFrame({"A": [None, None], "B": [5, 6]}, index=range(4, 6))
tm.assert_frame_equal(result, expected)
@td.skip_if_no("numexpr")
def test_invalid_parser():
msg = "Invalid parser 'asdf' passed"
with pytest.raises(KeyError, match=msg):
pd.eval("x + y", local_dict={"x": 1, "y": 2}, parser="asdf")
_parsers: dict[str, type[BaseExprVisitor]] = {
"python": PythonExprVisitor,
"pytables": pytables.PyTablesExprVisitor,
"pandas": PandasExprVisitor,
}
@pytest.mark.parametrize("engine", ENGINES)
@pytest.mark.parametrize("parser", _parsers)
def test_disallowed_nodes(engine, parser):
VisitorClass = _parsers[parser]
inst = VisitorClass("x + 1", engine, parser)
for ops in VisitorClass.unsupported_nodes:
msg = "nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
getattr(inst, ops)()
def test_syntax_error_exprs(engine, parser):
e = "s +"
with pytest.raises(SyntaxError, match="invalid syntax"):
pd.eval(e, engine=engine, parser=parser)
def test_name_error_exprs(engine, parser):
e = "s + t"
msg = "name 's' is not defined"
with pytest.raises(NameError, match=msg):
pd.eval(e, engine=engine, parser=parser)
@pytest.mark.parametrize("express", ["a + @b", "@a + b", "@a + @b"])
def test_invalid_local_variable_reference(engine, parser, express):
a, b = 1, 2 # noqa: F841
if parser != "pandas":
with pytest.raises(SyntaxError, match="The '@' prefix is only"):
pd.eval(express, engine=engine, parser=parser)
else:
with pytest.raises(SyntaxError, match="The '@' prefix is not"):
pd.eval(express, engine=engine, parser=parser)
def test_numexpr_builtin_raises(engine, parser):
sin, dotted_line = 1, 2
if engine == "numexpr":
msg = "Variables in expression .+"
with pytest.raises(NumExprClobberingError, match=msg):
pd.eval("sin + dotted_line", engine=engine, parser=parser)
else:
res = pd.eval("sin + dotted_line", engine=engine, parser=parser)
assert res == sin + dotted_line
def test_bad_resolver_raises(engine, parser):
cannot_resolve = 42, 3.0
with pytest.raises(TypeError, match="Resolver of type .+"):
pd.eval("1 + 2", resolvers=cannot_resolve, engine=engine, parser=parser)
def test_empty_string_raises(engine, parser):
# GH 13139
with pytest.raises(ValueError, match="expr cannot be an empty string"):
pd.eval("", engine=engine, parser=parser)
def test_more_than_one_expression_raises(engine, parser):
with pytest.raises(SyntaxError, match="only a single expression is allowed"):
pd.eval("1 + 1; 2 + 2", engine=engine, parser=parser)
@pytest.mark.parametrize("cmp", ("and", "or"))
@pytest.mark.parametrize("lhs", (int, float))
@pytest.mark.parametrize("rhs", (int, float))
def test_bool_ops_fails_on_scalars(lhs, cmp, rhs, engine, parser):
gen = {
int: lambda: np.random.default_rng(2).integers(10),
float: np.random.default_rng(2).standard_normal,
}
mid = gen[lhs]() # noqa: F841
lhs = gen[lhs]()
rhs = gen[rhs]()
ex1 = f"lhs {cmp} mid {cmp} rhs"
ex2 = f"lhs {cmp} mid and mid {cmp} rhs"
ex3 = f"(lhs {cmp} mid) & (mid {cmp} rhs)"
for ex in (ex1, ex2, ex3):
msg = "cannot evaluate scalar only bool ops|'BoolOp' nodes are not"
with pytest.raises(NotImplementedError, match=msg):
pd.eval(ex, engine=engine, parser=parser)
@pytest.mark.parametrize(
"other",
[
"'x'",
"...",
],
)
def test_equals_various(other):
df = DataFrame({"A": ["a", "b", "c"]}, dtype=object)
result = df.eval(f"A == {other}")
expected = Series([False, False, False], name="A")
tm.assert_series_equal(result, expected)
def test_inf(engine, parser):
s = "inf + 1"
expected = np.inf
result = pd.eval(s, engine=engine, parser=parser)
assert result == expected
@pytest.mark.parametrize("column", ["Temp(°C)", "Capacitance(μF)"])
def test_query_token(engine, column):
# See: https://github.com/pandas-dev/pandas/pull/42826
df = DataFrame(
np.random.default_rng(2).standard_normal((5, 2)), columns=[column, "b"]
)
expected = df[df[column] > 5]
query_string = f"`{column}` > 5"
result = df.query(query_string, engine=engine)
tm.assert_frame_equal(result, expected)
def test_negate_lt_eq_le(engine, parser):
df = DataFrame([[0, 10], [1, 20]], columns=["cat", "count"])
expected = df[~(df.cat > 0)]
result = df.query("~(cat > 0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
if parser == "python":
msg = "'Not' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query("not (cat > 0)", engine=engine, parser=parser)
else:
result = df.query("not (cat > 0)", engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"column",
DEFAULT_GLOBALS.keys(),
)
def test_eval_no_support_column_name(request, column):
# GH 44603
if column in ["True", "False", "inf", "Inf"]:
request.applymarker(
pytest.mark.xfail(
raises=KeyError,
reason=f"GH 47859 DataFrame eval not supported with {column}",
)
)
df = DataFrame(
np.random.default_rng(2).integers(0, 100, size=(10, 2)),
columns=[column, "col1"],
)
expected = df[df[column] > 6]
result = df.query(f"{column}>6")
tm.assert_frame_equal(result, expected)
def test_set_inplace():
# https://github.com/pandas-dev/pandas/issues/47449
# Ensure we don't only update the DataFrame inplace, but also the actual
# column values, such that references to this column also get updated
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
result_view = df[:]
ser = df["A"]
df.eval("A = B + C", inplace=True)
expected = DataFrame({"A": [11, 13, 15], "B": [4, 5, 6], "C": [7, 8, 9]})
tm.assert_frame_equal(df, expected)
expected = Series([1, 2, 3], name="A")
tm.assert_series_equal(ser, expected)
tm.assert_series_equal(result_view["A"], expected)
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_args(value):
msg = 'For argument "inplace" expected type bool, received type'
with pytest.raises(ValueError, match=msg):
pd.eval("2+2", inplace=value)
@td.skip_if_no("numexpr")
def test_eval_float_div_numexpr():
# GH 59736
result = pd.eval("1 / 2", engine="numexpr")
expected = 0.5
assert result == expected
def test_method_calls_on_binop():
# GH 61175
x = Series([1, 2, 3, 5])
y = Series([2, 3, 4])
# Method call on binary operation result
result = pd.eval("(x + y).dropna()")
expected = (x + y).dropna()
tm.assert_series_equal(result, expected)
# Test with other binary operations
result = pd.eval("(x * y).dropna()")
expected = (x * y).dropna()
tm.assert_series_equal(result, expected)
# Test with method chaining
result = pd.eval("(x + y).dropna().reset_index(drop=True)")
expected = (x + y).dropna().reset_index(drop=True)
tm.assert_series_equal(result, expected)
|
TestScope
|
python
|
pytorch__pytorch
|
torch/_inductor/ir.py
|
{
"start": 191287,
"end": 191445
}
|
class ____(InputsKernel):
def is_no_op(self) -> bool:
return True
def get_reads(self) -> OrderedSet[Dep]:
return OrderedSet()
|
NopKernel
|
python
|
rushter__MLAlgorithms
|
mla/neuralnet/regularizers.py
|
{
"start": 410,
"end": 508
}
|
class ____(Regularizer):
def _penalty(self, weights):
return self.C * np.abs(weights)
|
L1
|
python
|
celery__celery
|
t/unit/app/test_registry.py
|
{
"start": 558,
"end": 2349
}
|
class ____:
def setup_method(self):
self.mytask = self.app.task(name='A', shared=False)(returns)
self.missing_name_task = self.app.task(
name=None, shared=False)(returns)
self.missing_name_task.name = None # name is overridden with path
self.myperiodic = self.app.task(
name='B', shared=False, type='periodic',
)(returns)
def test_NotRegistered_str(self):
assert repr(self.app.tasks.NotRegistered('tasks.add'))
def assert_register_unregister_cls(self, r, task):
r.unregister(task)
with pytest.raises(r.NotRegistered):
r.unregister(task)
r.register(task)
assert task.name in r
def test_task_registry(self):
r = self.app._tasks
assert isinstance(r, dict)
self.assert_register_unregister_cls(r, self.mytask)
self.assert_register_unregister_cls(r, self.myperiodic)
with pytest.raises(InvalidTaskError):
r.register(self.missing_name_task)
r.register(self.myperiodic)
r.unregister(self.myperiodic.name)
assert self.myperiodic not in r
r.register(self.myperiodic)
tasks = dict(r)
assert tasks.get(self.mytask.name) is self.mytask
assert tasks.get(self.myperiodic.name) is self.myperiodic
assert r[self.mytask.name] is self.mytask
assert r[self.myperiodic.name] is self.myperiodic
r.unregister(self.mytask)
assert self.mytask.name not in r
r.unregister(self.myperiodic)
assert self.myperiodic.name not in r
assert self.mytask.run()
assert self.myperiodic.run()
def test_compat(self):
assert self.app.tasks.regular()
assert self.app.tasks.periodic()
|
test_TaskRegistry
|
python
|
doocs__leetcode
|
lcof/面试题37. 序列化二叉树/Solution.py
|
{
"start": 172,
"end": 1441
}
|
class ____:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root is None:
return ""
q = deque([root])
ans = []
while q:
node = q.popleft()
if node:
ans.append(str(node.val))
q.append(node.left)
q.append(node.right)
else:
ans.append("#")
return ",".join(ans)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if not data:
return None
vals = data.split(",")
root = TreeNode(int(vals[0]))
q = deque([root])
i = 1
while q:
node = q.popleft()
if vals[i] != "#":
node.left = TreeNode(int(vals[i]))
q.append(node.left)
i += 1
if vals[i] != "#":
node.right = TreeNode(int(vals[i]))
q.append(node.right)
i += 1
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
Codec
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_comprehend.py
|
{
"start": 1711,
"end": 4365
}
|
class ____:
@pytest.mark.parametrize("aws_conn_id", [None, NOTSET, "aws_test_conn"])
@pytest.mark.parametrize("region_name", [None, NOTSET, "ca-central-1"])
def test_initialize_comprehend_base_operator(self, aws_conn_id, region_name):
op_kw = {"aws_conn_id": aws_conn_id, "region_name": region_name}
op_kw = {k: v for k, v in op_kw.items() if v is not NOTSET}
comprehend_base_op = ComprehendBaseOperator(
task_id="comprehend_base_operator",
input_data_config=INPUT_DATA_CONFIG,
output_data_config=OUTPUT_DATA_CONFIG,
language_code=LANGUAGE_CODE,
data_access_role_arn=ROLE_ARN,
**op_kw,
)
assert comprehend_base_op.aws_conn_id == (aws_conn_id if aws_conn_id is not NOTSET else "aws_default")
assert comprehend_base_op.region_name == (region_name if region_name is not NOTSET else None)
@mock.patch.object(ComprehendBaseOperator, "hook", new_callable=mock.PropertyMock)
def test_initialize_comprehend_base_operator_hook(self, comprehend_base_operator_mock_hook):
comprehend_base_op = ComprehendBaseOperator(
task_id="comprehend_base_operator",
input_data_config=INPUT_DATA_CONFIG,
output_data_config=OUTPUT_DATA_CONFIG,
language_code=LANGUAGE_CODE,
data_access_role_arn=ROLE_ARN,
)
mocked_hook = mock.MagicMock(name="MockHook")
mocked_client = mock.MagicMock(name="MockClient")
mocked_hook.conn = mocked_client
comprehend_base_operator_mock_hook.return_value = mocked_hook
assert comprehend_base_op.client == mocked_client
comprehend_base_operator_mock_hook.assert_called_once()
def test_overwritten_conn_passed_to_hook(self):
OVERWRITTEN_CONN = "new-conn-id"
op = ComprehendBaseOperator(
task_id="comprehend_base_operator",
input_data_config=INPUT_DATA_CONFIG,
output_data_config=OUTPUT_DATA_CONFIG,
language_code=LANGUAGE_CODE,
data_access_role_arn=ROLE_ARN,
aws_conn_id=OVERWRITTEN_CONN,
)
assert op.hook.aws_conn_id == OVERWRITTEN_CONN
def test_default_conn_passed_to_hook(self):
DEFAULT_CONN = "aws_default"
op = ComprehendBaseOperator(
task_id="comprehend_base_operator",
input_data_config=INPUT_DATA_CONFIG,
output_data_config=OUTPUT_DATA_CONFIG,
language_code=LANGUAGE_CODE,
data_access_role_arn=ROLE_ARN,
)
assert op.hook.aws_conn_id == DEFAULT_CONN
|
TestComprehendBaseOperator
|
python
|
kamyu104__LeetCode-Solutions
|
Python/divide-two-integers.py
|
{
"start": 39,
"end": 1187
}
|
class ____(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
result, dvd, dvs = 0, abs(dividend), abs(divisor)
while dvd >= dvs:
inc = dvs
i = 0
while dvd >= inc:
dvd -= inc
result += 1 << i
inc <<= 1
i += 1
if dividend > 0 and divisor < 0 or dividend < 0 and divisor > 0:
return -result
else:
return result
def divide2(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
positive = (dividend < 0) is (divisor < 0)
dividend, divisor = abs(dividend), abs(divisor)
res = 0
while dividend >= divisor:
temp, i = divisor, 1
while dividend >= temp:
dividend -= temp
res += i
i <<= 1
temp <<= 1
if not positive:
res = -res
return min(max(-2147483648, res), 2147483647)
|
Solution
|
python
|
PyCQA__pylint
|
pylint/checkers/variables.py
|
{
"start": 17967,
"end": 47319
}
|
class ____:
"""A simple class to handle consumed, to consume and scope type info of node locals."""
node: nodes.NodeNG
scope_type: str
to_consume: Consumption
consumed: Consumption
consumed_uncertain: Consumption
"""Retrieves nodes filtered out by get_next_to_consume() that may not
have executed.
These include nodes such as statements in except blocks, or statements
in try blocks (when evaluating their corresponding except and finally
blocks). Checkers that want to treat the statements as executed
(e.g. for unused-variable) may need to add them back.
"""
def __init__(self, node: nodes.NodeNG, scope_type: str):
self.node = node
self.scope_type = scope_type
self.to_consume = copy.copy(node.locals)
self.consumed = {}
self.consumed_uncertain = defaultdict(list)
self.names_under_always_false_test: set[str] = set()
self.names_defined_under_one_branch_only: set[str] = set()
def __repr__(self) -> str:
_to_consumes = [f"{k}->{v}" for k, v in self.to_consume.items()]
_consumed = [f"{k}->{v}" for k, v in self.consumed.items()]
_consumed_uncertain = [f"{k}->{v}" for k, v in self.consumed_uncertain.items()]
to_consumes = ", ".join(_to_consumes)
consumed = ", ".join(_consumed)
consumed_uncertain = ", ".join(_consumed_uncertain)
return f"""
to_consume : {to_consumes}
consumed : {consumed}
consumed_uncertain: {consumed_uncertain}
scope_type : {self.scope_type}
"""
def mark_as_consumed(self, name: str, consumed_nodes: list[nodes.NodeNG]) -> None:
"""Mark the given nodes as consumed for the name.
If all of the nodes for the name were consumed, delete the name from
the to_consume dictionary
"""
unconsumed = [n for n in self.to_consume[name] if n not in set(consumed_nodes)]
self.consumed[name] = consumed_nodes
if unconsumed:
self.to_consume[name] = unconsumed
else:
del self.to_consume[name]
def get_next_to_consume(self, node: nodes.Name) -> list[nodes.NodeNG] | None:
"""Return a list of the nodes that define `node` from this scope.
If it is uncertain whether a node will be consumed, such as for statements in
except blocks, add it to self.consumed_uncertain instead of returning it.
Return None to indicate a special case that needs to be handled by the caller.
"""
name = node.name
parent_node = node.parent
found_nodes = self.to_consume.get(name)
node_statement = node.statement()
if (
found_nodes
and isinstance(parent_node, nodes.Assign)
and parent_node == found_nodes[0].parent
):
lhs = found_nodes[0].parent.targets[0]
if (
isinstance(lhs, nodes.AssignName) and lhs.name == name
): # this name is defined in this very statement
found_nodes = None
if (
found_nodes
and isinstance(parent_node, nodes.For)
and parent_node.iter == node
and parent_node.target in found_nodes
):
# Only filter out the for-loop target if there are other definitions besides the target
other_definitions = [fn for fn in found_nodes if fn != parent_node.target]
if other_definitions:
found_nodes = other_definitions
else:
found_nodes = None
# Before filtering, check that this node's name is not a nonlocal
if _is_nonlocal_name(node, node.frame()):
return found_nodes
# And no comprehension is under the node's frame
if VariablesChecker._comprehension_between_frame_and_node(node):
return found_nodes
# Filter out assignments in ExceptHandlers that node is not contained in
if found_nodes:
found_nodes = [
n
for n in found_nodes
if not isinstance(n.statement(), nodes.ExceptHandler)
or n.statement().parent_of(node)
]
# Filter out assignments guarded by always false conditions
if found_nodes:
uncertain_nodes = self._uncertain_nodes_if_tests(found_nodes, node)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# Filter out assignments in an Except clause that the node is not
# contained in, assuming they may fail
if found_nodes:
uncertain_nodes = self._uncertain_nodes_in_except_blocks(
found_nodes, node, node_statement
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# If this node is in a Finally block of a Try/Finally,
# filter out assignments in the try portion, assuming they may fail
if found_nodes:
uncertain_nodes = (
self._uncertain_nodes_in_try_blocks_when_evaluating_finally_blocks(
found_nodes, node_statement, name
)
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
# If this node is in an ExceptHandler,
# filter out assignments in the try portion, assuming they may fail
if found_nodes:
uncertain_nodes = (
self._uncertain_nodes_in_try_blocks_when_evaluating_except_blocks(
found_nodes, node_statement
)
)
self.consumed_uncertain[node.name] += uncertain_nodes
uncertain_nodes_set = set(uncertain_nodes)
found_nodes = [n for n in found_nodes if n not in uncertain_nodes_set]
return found_nodes
def _inferred_to_define_name_raise_or_return(
self,
name: str,
node: nodes.Try | nodes.With | nodes.For | nodes.While | nodes.Match | nodes.If,
) -> bool:
"""Return True if there is a path under this `if_node`
that is inferred to define `name`, raise, or return.
"""
match node:
case nodes.Try():
# Allow either a path through try/else/finally OR a path through ALL except handlers
try_except_node = node
if node.finalbody:
try_except_node = next(
(child for child in node.nodes_of_class(nodes.Try)),
None,
)
handlers = try_except_node.handlers if try_except_node else []
return NamesConsumer._defines_name_raises_or_returns_recursive(
name, node
) or all(
NamesConsumer._defines_name_raises_or_returns_recursive(
name, handler
)
for handler in handlers
)
case nodes.With() | nodes.For() | nodes.While():
return NamesConsumer._defines_name_raises_or_returns_recursive(
name, node
)
case nodes.Match():
return all(
NamesConsumer._defines_name_raises_or_returns_recursive(name, case)
for case in node.cases
)
case nodes.If():
return self._inferred_to_define_name_raise_or_return_for_if_node(
name, node
)
case _: # pragma: no cover
# The function is only called for Try, With, For, While, Match and
# If nodes. All of which are being handled above.
raise AssertionError
def _inferred_to_define_name_raise_or_return_for_if_node(
self, name: str, node: nodes.If
) -> bool:
# Be permissive if there is a break or a continue
if any(node.nodes_of_class(nodes.Break, nodes.Continue)):
return True
# Is there an assignment in this node itself, e.g. in named expression?
if NamesConsumer._defines_name_raises_or_returns(name, node):
return True
test = node.test.value if isinstance(node.test, nodes.NamedExpr) else node.test
all_inferred = utils.infer_all(test)
only_search_if = False
only_search_else = True
for inferred in all_inferred:
if not isinstance(inferred, nodes.Const):
only_search_else = False
continue
val = inferred.value
only_search_if = only_search_if or (val != NotImplemented and val)
only_search_else = only_search_else and not val
# Only search else branch when test condition is inferred to be false
if all_inferred and only_search_else:
self.names_under_always_false_test.add(name)
return self._branch_handles_name(name, node.orelse)
# Search both if and else branches
if_branch_handles = self._branch_handles_name(name, node.body)
else_branch_handles = self._branch_handles_name(name, node.orelse)
if if_branch_handles ^ else_branch_handles:
self.names_defined_under_one_branch_only.add(name)
elif name in self.names_defined_under_one_branch_only:
self.names_defined_under_one_branch_only.remove(name)
return if_branch_handles and else_branch_handles
def _branch_handles_name(self, name: str, body: Iterable[nodes.NodeNG]) -> bool:
return any(
NamesConsumer._defines_name_raises_or_returns(name, if_body_stmt)
or (
isinstance(
if_body_stmt,
(
nodes.If,
nodes.Try,
nodes.With,
nodes.For,
nodes.While,
nodes.Match,
),
)
and self._inferred_to_define_name_raise_or_return(name, if_body_stmt)
)
for if_body_stmt in body
)
def _uncertain_nodes_if_tests(
self,
found_nodes: list[nodes.NodeNG],
node: nodes.NodeNG,
) -> list[nodes.NodeNG]:
"""Identify nodes of uncertain execution because they are defined under if
tests.
Don't identify a node if there is a path that is inferred to
define the name, raise, or return (e.g. any executed if/elif/else branch).
"""
uncertain_nodes = []
for other_node in found_nodes:
match other_node:
case nodes.AssignName():
name = other_node.name
case nodes.Import() | nodes.ImportFrom():
name = node.name
case nodes.FunctionDef() | nodes.ClassDef():
name = other_node.name
case _:
continue
all_if = [
n
for n in other_node.node_ancestors()
if isinstance(n, nodes.If) and not n.parent_of(node)
]
if not all_if:
continue
closest_if = all_if[0]
if (
isinstance(node, nodes.AssignName)
and node.frame() is not closest_if.frame()
):
continue
if closest_if.parent_of(node):
continue
outer_if = all_if[-1]
if NamesConsumer._node_guarded_by_same_test(node, outer_if):
continue
# Name defined in the if/else control flow
if self._inferred_to_define_name_raise_or_return(name, outer_if):
continue
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _node_guarded_by_same_test(node: nodes.NodeNG, other_if: nodes.If) -> bool:
"""Identify if `node` is guarded by an equivalent test as `other_if`.
Two tests are equivalent if their string representations are identical
or if their inferred values consist only of constants and those constants
are identical, and the if test guarding `node` is not a Name.
"""
if isinstance(other_if.test, nodes.NamedExpr):
other_if_test = other_if.test.target
else:
other_if_test = other_if.test
other_if_test_as_string = other_if_test.as_string()
other_if_test_all_inferred = utils.infer_all(other_if_test)
for ancestor in node.node_ancestors():
if not isinstance(ancestor, (nodes.If, nodes.IfExp)):
continue
if ancestor.test.as_string() == other_if_test_as_string:
return True
if isinstance(ancestor.test, nodes.Name):
continue
all_inferred = utils.infer_all(ancestor.test)
if len(all_inferred) == len(other_if_test_all_inferred):
if any(
not isinstance(test, nodes.Const)
for test in (*all_inferred, *other_if_test_all_inferred)
):
continue
if {test.value for test in all_inferred} != {
test.value for test in other_if_test_all_inferred
}:
continue
return True
return False
@staticmethod
def _uncertain_nodes_in_except_blocks(
found_nodes: list[nodes.NodeNG],
node: nodes.NodeNG,
node_statement: _base_nodes.Statement,
) -> list[nodes.NodeNG]:
"""Return any nodes in ``found_nodes`` that should be treated as uncertain
because they are in an except block.
"""
uncertain_nodes = []
for other_node in found_nodes:
other_node_statement = other_node.statement()
# Only testing for statements in the except block of Try
closest_except_handler = utils.get_node_first_ancestor_of_type(
other_node_statement, nodes.ExceptHandler
)
if not closest_except_handler:
continue
# If the other node is in the same scope as this node, assume it executes
if closest_except_handler.parent_of(node):
continue
closest_try_except: nodes.Try = closest_except_handler.parent
# If the try or else blocks return, assume the except blocks execute.
try_block_returns = any(
isinstance(try_statement, nodes.Return)
for try_statement in closest_try_except.body
)
else_block_returns = any(
isinstance(else_statement, nodes.Return)
for else_statement in closest_try_except.orelse
)
else_block_exits = any(
isinstance(else_statement, nodes.Expr)
and isinstance(else_statement.value, nodes.Call)
and utils.is_terminating_func(else_statement.value)
for else_statement in closest_try_except.orelse
)
else_block_continues = any(
isinstance(else_statement, nodes.Continue)
for else_statement in closest_try_except.orelse
)
if (
else_block_continues
and isinstance(node_statement.parent, (nodes.For, nodes.While))
and closest_try_except.parent.parent_of(node_statement)
):
continue
if try_block_returns or else_block_returns or else_block_exits:
# Exception: if this node is in the final block of the other_node_statement,
# it will execute before returning. Assume the except statements are uncertain.
if (
isinstance(node_statement.parent, nodes.Try)
and node_statement in node_statement.parent.finalbody
and closest_try_except.parent.parent_of(node_statement)
):
uncertain_nodes.append(other_node)
# Or the node_statement is in the else block of the relevant Try
elif (
isinstance(node_statement.parent, nodes.Try)
and node_statement in node_statement.parent.orelse
and closest_try_except.parent.parent_of(node_statement)
):
uncertain_nodes.append(other_node)
# Assume the except blocks execute, so long as each handler
# defines the name, raises, or returns.
elif all(
NamesConsumer._defines_name_raises_or_returns_recursive(
node.name, handler
)
for handler in closest_try_except.handlers
):
continue
if NamesConsumer._check_loop_finishes_via_except(node, closest_try_except):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _defines_name_raises_or_returns(name: str, node: nodes.NodeNG) -> bool:
if isinstance(node, (nodes.Raise, nodes.Assert, nodes.Return, nodes.Continue)):
return True
if isinstance(node, nodes.Expr) and isinstance(node.value, nodes.Call):
if utils.is_terminating_func(node.value):
return True
if (
isinstance(node.value.func, nodes.Name)
and node.value.func.name == "assert_never"
):
return True
if (
isinstance(node, nodes.AnnAssign)
and node.value
and isinstance(node.target, nodes.AssignName)
and node.target.name == name
):
return True
if isinstance(node, nodes.Assign):
for target in node.targets:
for elt in utils.get_all_elements(target):
if isinstance(elt, nodes.Starred):
elt = elt.value
if isinstance(elt, nodes.AssignName) and elt.name == name:
return True
if isinstance(node, nodes.If):
if any(
child_named_expr.target.name == name
for child_named_expr in node.nodes_of_class(nodes.NamedExpr)
):
return True
if isinstance(node, (nodes.Import, nodes.ImportFrom)) and any(
(node_name[1] and node_name[1] == name)
or (node_name[0] == name)
or (node_name[0].startswith(name + "."))
for node_name in node.names
):
return True
if isinstance(node, nodes.With) and any(
isinstance(item[1], nodes.AssignName) and item[1].name == name
for item in node.items
):
return True
if isinstance(node, (nodes.ClassDef, nodes.FunctionDef)) and node.name == name:
return True
if (
isinstance(node, nodes.ExceptHandler)
and node.name
and node.name.name == name
):
return True
return False
@staticmethod
def _defines_name_raises_or_returns_recursive(
name: str,
node: nodes.NodeNG,
) -> bool:
"""Return True if some child of `node` defines the name `name`,
raises, or returns.
"""
for stmt in node.get_children():
if NamesConsumer._defines_name_raises_or_returns(name, stmt):
return True
match stmt:
case nodes.If() | nodes.With():
if any(
NamesConsumer._defines_name_raises_or_returns(name, nested_stmt)
for nested_stmt in stmt.get_children()
):
return True
case nodes.Try() if (
not stmt.finalbody
and NamesConsumer._defines_name_raises_or_returns_recursive(
name, stmt
)
):
return True
case nodes.Match():
return all(
NamesConsumer._defines_name_raises_or_returns_recursive(
name, case
)
for case in stmt.cases
)
return False
@staticmethod
def _check_loop_finishes_via_except(
node: nodes.NodeNG,
other_node_try_except: nodes.Try,
) -> bool:
"""Check for a specific control flow scenario.
Described in https://github.com/pylint-dev/pylint/issues/5683.
A scenario where the only non-break exit from a loop consists of the very
except handler we are examining, such that code in the `else` branch of
the loop can depend on it being assigned.
Example:
for _ in range(3):
try:
do_something()
except:
name = 1 <-- only non-break exit from loop
else:
break
else:
print(name)
"""
if not other_node_try_except.orelse:
return False
closest_loop: nodes.For | nodes.While | None = (
utils.get_node_first_ancestor_of_type(node, (nodes.For, nodes.While))
)
if closest_loop is None:
return False
if not any(
else_statement is node or else_statement.parent_of(node)
for else_statement in closest_loop.orelse
):
# `node` not guarded by `else`
return False
for inner_else_statement in other_node_try_except.orelse:
if isinstance(inner_else_statement, nodes.Break):
break_stmt = inner_else_statement
break
else:
# No break statement
return False
def _try_in_loop_body(
other_node_try_except: nodes.Try,
loop: nodes.For | nodes.While,
) -> bool:
"""Return True if `other_node_try_except` is a descendant of `loop`."""
return any(
loop_body_statement is other_node_try_except
or loop_body_statement.parent_of(other_node_try_except)
for loop_body_statement in loop.body
)
if not _try_in_loop_body(other_node_try_except, closest_loop):
for ancestor in closest_loop.node_ancestors():
if isinstance(ancestor, (nodes.For, nodes.While)):
if _try_in_loop_body(other_node_try_except, ancestor):
break
else:
# `other_node_try_except` didn't have a shared ancestor loop
return False
for loop_stmt in closest_loop.body:
if NamesConsumer._recursive_search_for_continue_before_break(
loop_stmt, break_stmt
):
break
else:
# No continue found, so we arrived at our special case!
return True
return False
@staticmethod
def _recursive_search_for_continue_before_break(
stmt: _base_nodes.Statement,
break_stmt: nodes.Break,
) -> bool:
"""Return True if any Continue node can be found in descendants of `stmt`
before encountering `break_stmt`, ignoring any nested loops.
"""
if stmt is break_stmt:
return False
if isinstance(stmt, nodes.Continue):
return True
for child in stmt.get_children():
if isinstance(stmt, (nodes.For, nodes.While)):
continue
if NamesConsumer._recursive_search_for_continue_before_break(
child, break_stmt
):
return True
return False
@staticmethod
def _uncertain_nodes_in_try_blocks_when_evaluating_except_blocks(
found_nodes: list[nodes.NodeNG],
node_statement: _base_nodes.Statement,
) -> list[nodes.NodeNG]:
"""Return any nodes in ``found_nodes`` that should be treated as uncertain.
Nodes are uncertain when they are in a try block and the ``node_statement``
being evaluated is in one of its except handlers.
"""
uncertain_nodes: list[nodes.NodeNG] = []
closest_except_handler = utils.get_node_first_ancestor_of_type(
node_statement, nodes.ExceptHandler
)
if closest_except_handler is None:
return uncertain_nodes
for other_node in found_nodes:
other_node_statement = other_node.statement()
# If the other statement is the except handler guarding `node`, it executes
if other_node_statement is closest_except_handler:
continue
# Ensure other_node is in a try block
(
other_node_try_ancestor,
other_node_try_ancestor_visited_child,
) = utils.get_node_first_ancestor_of_type_and_its_child(
other_node_statement, nodes.Try
)
if other_node_try_ancestor is None:
continue
if (
other_node_try_ancestor_visited_child
not in other_node_try_ancestor.body
):
continue
# Make sure nesting is correct -- there should be at least one
# except handler that is a sibling attached to the try ancestor,
# or is an ancestor of the try ancestor.
if not any(
closest_except_handler in other_node_try_ancestor.handlers
or other_node_try_ancestor_except_handler
in closest_except_handler.node_ancestors()
for other_node_try_ancestor_except_handler in other_node_try_ancestor.handlers
):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _uncertain_nodes_in_try_blocks_when_evaluating_finally_blocks(
found_nodes: list[nodes.NodeNG],
node_statement: _base_nodes.Statement,
name: str,
) -> list[nodes.NodeNG]:
uncertain_nodes: list[nodes.NodeNG] = []
(
closest_try_finally_ancestor,
child_of_closest_try_finally_ancestor,
) = utils.get_node_first_ancestor_of_type_and_its_child(
node_statement, nodes.Try
)
if closest_try_finally_ancestor is None:
return uncertain_nodes
if (
child_of_closest_try_finally_ancestor
not in closest_try_finally_ancestor.finalbody
):
return uncertain_nodes
for other_node in found_nodes:
other_node_statement = other_node.statement()
(
other_node_try_finally_ancestor,
child_of_other_node_try_finally_ancestor,
) = utils.get_node_first_ancestor_of_type_and_its_child(
other_node_statement, nodes.Try
)
if other_node_try_finally_ancestor is None:
continue
# other_node needs to descend from the try of a try/finally.
if (
child_of_other_node_try_finally_ancestor
not in other_node_try_finally_ancestor.body
):
continue
# If the two try/finally ancestors are not the same, then
# node_statement's closest try/finally ancestor needs to be in
# the final body of other_node's try/finally ancestor, or
# descend from one of the statements in that final body.
if (
other_node_try_finally_ancestor is not closest_try_finally_ancestor
and not any(
other_node_final_statement is closest_try_finally_ancestor
or other_node_final_statement.parent_of(
closest_try_finally_ancestor
)
for other_node_final_statement in other_node_try_finally_ancestor.finalbody
)
):
continue
# Is the name defined in all exception clauses?
if other_node_try_finally_ancestor.handlers and all(
NamesConsumer._defines_name_raises_or_returns_recursive(name, handler)
for handler in other_node_try_finally_ancestor.handlers
):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
# pylint: disable=too-many-public-methods
|
NamesConsumer
|
python
|
great-expectations__great_expectations
|
tests/datasource/fluent/test_spark_azure_blob_storage_datasource.py
|
{
"start": 1251,
"end": 10358
}
|
class ____:
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def get_container_client(self, container: str) -> azure.ContainerClient:
return cast("azure.ContainerClient", MockContainerClient())
def _build_spark_abs_datasource(
azure_options: Dict[str, Any] | None = None,
) -> SparkAzureBlobStorageDatasource:
azure_client: azure.BlobServiceClient = cast("azure.BlobServiceClient", MockBlobServiceClient())
spark_abs_datasource = SparkAzureBlobStorageDatasource(
name="spark_abs_datasource",
azure_options=azure_options or {},
)
spark_abs_datasource._azure_client = azure_client
return spark_abs_datasource
@pytest.fixture
def spark_abs_datasource() -> SparkAzureBlobStorageDatasource:
spark_abs_datasource: SparkAzureBlobStorageDatasource = _build_spark_abs_datasource()
return spark_abs_datasource
@pytest.fixture
def object_keys() -> List[str]:
return [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
@pytest.fixture
@mock.patch(
"great_expectations.datasource.fluent.data_connector.azure_blob_storage_data_connector.list_azure_keys"
)
def csv_asset(
mock_list_keys,
object_keys: List[str],
spark_abs_datasource: SparkAzureBlobStorageDatasource,
) -> PathDataAsset:
mock_list_keys.return_value = object_keys
asset = spark_abs_datasource.add_csv_asset(
name="csv_asset",
abs_container="my_container",
)
return asset
@pytest.mark.unit
def test_construct_spark_abs_datasource_with_account_url_and_credential():
spark_abs_datasource = SparkAzureBlobStorageDatasource(
name="spark_abs_datasource",
azure_options={
"account_url": "my_account_url.blob.core.windows.net",
"credential": "my_credential",
},
)
# noinspection PyUnresolvedReferences
azure_client: azure.BlobServiceClient = spark_abs_datasource._get_azure_client()
assert azure_client is not None
assert spark_abs_datasource.name == "spark_abs_datasource"
@pytest.mark.unit
def test_construct_spark_abs_datasource_with_conn_str_and_credential():
spark_abs_datasource = SparkAzureBlobStorageDatasource(
name="spark_abs_datasource",
azure_options={ # Representative of format noted in official docs
"conn_str": "DefaultEndpointsProtocol=https;AccountName=storagesample;AccountKey=my_account_key", # noqa: E501 # FIXME CoP
"credential": "my_credential",
},
)
# noinspection PyUnresolvedReferences
azure_client: azure.BlobServiceClient = spark_abs_datasource._get_azure_client()
assert azure_client is not None
assert spark_abs_datasource.name == "spark_abs_datasource"
@pytest.mark.unit
def test_construct_spark_abs_datasource_with_valid_account_url_assigns_account_name():
spark_abs_datasource = SparkAzureBlobStorageDatasource(
name="spark_abs_datasource",
azure_options={
"account_url": "my_account_url.blob.core.windows.net",
"credential": "my_credential",
},
)
# noinspection PyUnresolvedReferences
azure_client: azure.BlobServiceClient = spark_abs_datasource._get_azure_client()
assert azure_client is not None
assert spark_abs_datasource.name == "spark_abs_datasource"
@pytest.mark.unit
def test_construct_spark_abs_datasource_with_valid_conn_str_assigns_account_name():
spark_abs_datasource = SparkAzureBlobStorageDatasource(
name="spark_abs_datasource",
azure_options={ # Representative of format noted in official docs
"conn_str": "DefaultEndpointsProtocol=https;AccountName=storagesample;AccountKey=my_account_key", # noqa: E501 # FIXME CoP
"credential": "my_credential",
},
)
# noinspection PyUnresolvedReferences
azure_client: azure.BlobServiceClient = spark_abs_datasource._get_azure_client()
assert azure_client is not None
assert spark_abs_datasource.name == "spark_abs_datasource"
@pytest.mark.unit
def test_construct_spark_abs_datasource_with_multiple_auth_methods_raises_error():
# Raises error in DataContext's schema validation due to having both `account_url` and `conn_str` # noqa: E501 # FIXME CoP
with pytest.raises(SparkAzureBlobStorageDatasourceError):
spark_abs_datasource = SparkAzureBlobStorageDatasource(
name="spark_abs_datasource",
azure_options={
"account_url": "account.blob.core.windows.net",
"conn_str": "DefaultEndpointsProtocol=https;AccountName=storagesample;AccountKey=my_account_key", # noqa: E501 # FIXME CoP
"credential": "my_credential",
},
)
# noinspection PyUnresolvedReferences
_ = spark_abs_datasource._get_azure_client()
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_connector.azure_blob_storage_data_connector.list_azure_keys"
)
@mock.patch("azure.storage.blob.BlobServiceClient")
def test_add_csv_asset_to_datasource(
mock_azure_client,
mock_list_keys,
object_keys: List[str],
spark_abs_datasource: SparkAzureBlobStorageDatasource,
):
mock_list_keys.return_value = object_keys
asset_specified_metadata = {"asset_level_metadata": "my_metadata"}
asset = spark_abs_datasource.add_csv_asset(
name="csv_asset",
abs_container="my_container",
batch_metadata=asset_specified_metadata,
)
assert asset.name == "csv_asset"
assert asset.batch_metadata == asset_specified_metadata
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_connector.azure_blob_storage_data_connector.list_azure_keys"
)
@mock.patch("azure.storage.blob.BlobServiceClient")
def test_construct_csv_asset_directly(mock_azure_client, mock_list_keys, object_keys: List[str]):
mock_list_keys.return_value = object_keys
asset = CSVAsset( # type: ignore[call-arg] # missing args
name="csv_asset",
)
assert asset.name == "csv_asset"
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_connector.azure_blob_storage_data_connector.list_azure_keys"
)
@mock.patch("azure.storage.blob.BlobServiceClient")
def test_csv_asset_with_batching_regex_named_parameters(
mock_azure_client,
mock_list_keys,
object_keys: List[str],
spark_abs_datasource: SparkAzureBlobStorageDatasource,
):
mock_list_keys.return_value = object_keys
asset = spark_abs_datasource.add_csv_asset(
name="csv_asset",
abs_container="my_container",
)
batching_regex = r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv"
batch_def = asset.add_batch_definition_monthly(name="batch def", regex=batching_regex)
options = asset.get_batch_parameters_keys(partitioner=batch_def.partitioner)
assert options == ("path", "year", "month")
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_connector.azure_blob_storage_data_connector.list_azure_keys"
)
@mock.patch("azure.storage.blob.BlobServiceClient")
def test_csv_asset_with_non_string_batching_regex_named_parameters(
mock_azure_client,
mock_list_keys,
object_keys: List[str],
spark_abs_datasource: SparkAzureBlobStorageDatasource,
):
mock_list_keys.return_value = object_keys
asset = spark_abs_datasource.add_csv_asset(
name="csv_asset",
abs_container="my_container",
)
with pytest.raises(ge_exceptions.InvalidBatchRequestError):
# price is an int which will raise an error
asset.build_batch_request({"name": "alex", "timestamp": "1234567890", "price": 1300})
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_connector.azure_blob_storage_data_connector.list_azure_keys"
)
@mock.patch("azure.storage.blob.BlobServiceClient")
def test_add_csv_asset_with_recursive_file_discovery_to_datasource(
mock_azure_client,
mock_list_keys,
object_keys: List[str],
spark_abs_datasource: SparkAzureBlobStorageDatasource,
):
"""
Tests that the abs_recursive_file_discovery-flag is passed on
to the list_keys-function as the recursive-parameter
This makes the list_keys-function search and return files also
from sub-directories on Azure, not just the files in the folder
specified with the abs_name_starts_with-parameter
"""
mock_list_keys.return_value = object_keys
asset_specified_metadata = {"asset_level_metadata": "my_metadata"}
spark_abs_datasource.add_csv_asset(
name="csv_asset",
abs_container="my_container",
batch_metadata=asset_specified_metadata,
abs_recursive_file_discovery=True,
)
assert "recursive" in mock_list_keys.call_args.kwargs
assert mock_list_keys.call_args.kwargs["recursive"] is True
|
MockBlobServiceClient
|
python
|
pytorch__pytorch
|
torchgen/_autoheuristic/pad_mm/gen_data_pad_mm.py
|
{
"start": 537,
"end": 4717
}
|
class ____(BenchmarkRunner): # type: ignore[misc, no-any-unimported]
"""
BenchmarkRunner for pad_mm. Used to generate collect training data with AutoHeuristic to learn a heuristic.
"""
def __init__(self) -> None:
super().__init__("pad_mm")
def create_input(self) -> tuple[Any, ...]:
dtype = self.get_dtype()
set_precision(dtype)
m, k, n = self.get_m_k_n(dtype)
(transpose_left, transpose_right) = transpose_tensors()
prepadded_left = self.prepadded()
prepadded_right = self.prepadded()
return (
m,
k,
n,
transpose_left,
transpose_right,
dtype,
prepadded_left,
prepadded_right,
)
def run_benchmark(
self,
m: int,
k: int,
n: int,
transpose_left: bool,
transpose_right: bool,
dtype: Any,
prepadded_left: bool,
prepadded_right: bool,
) -> None:
a, b = get_mm_tensors(
m,
k,
n,
transpose_left,
transpose_right,
dtype_left=dtype,
dtype_right=dtype,
)
print("Benchmarking the following input:")
print(f"m={m} k={k} n={n} dtype={dtype}")
print(f"transpose_left={transpose_left} transpose_right={transpose_right}")
print(f"prepadded_left={prepadded_left} prepadded_right={prepadded_right}")
with fresh_cache():
def mm(a: Any, b: Any) -> Any:
return torch.mm(a, b)
def mm_mat1_prepadded(a: Any, b: Any) -> Any:
return torch.mm(a + 1, b)
def mm_mat2_prepadded(a: Any, b: Any) -> Any:
return torch.mm(a, b + 1)
def mm_mat1_mat2_prepadded(a: Any, b: Any) -> Any:
return torch.mm(a + 1, b + 1)
if prepadded_left and prepadded_right:
cf = torch.compile(mm_mat1_mat2_prepadded)
elif prepadded_left:
cf = torch.compile(mm_mat1_prepadded)
elif prepadded_right:
cf = torch.compile(mm_mat2_prepadded)
else:
cf = torch.compile(mm)
cf(a, b)
torch.compiler.reset()
def get_random_dim(
self, min_power2: int = 1, max_power2: int = 16, p_unaligned: float = 0.25
) -> int:
aligned = random.choices([True, False], [1 - p_unaligned, p_unaligned])[0]
if aligned:
return 2 ** random.randint(min_power2, max_power2) # type: ignore[no-any-return]
else:
# choose a random number between 2^i and 2^(i+1)
return self.get_random_between_pow2(min_power2, max_power2) # type: ignore[no-any-return]
def is_aligned(self, dim: int, align_size: int) -> bool:
return dim % align_size == 0
def get_m_k_n(self, dtype: Any) -> tuple[int, int, int]:
uniform = random.choices([True, False])[0]
align_size = get_alignment_size_dtype(dtype)
# repeat until tensors fit in memory
while True:
if uniform:
m = random.randint(1, 65536)
k = random.randint(1, 65536)
n = random.randint(1, 65536)
else:
m = self.get_random_dim()
k = self.get_random_dim()
n = self.get_random_dim()
if all(self.is_aligned(dim, align_size) for dim in [m, k, n]):
# skip if already aligned
continue
if fits_in_memory(dtype, m, k, n):
return (m, k, n)
def prepadded(self, p_prepadded: float = 0.2) -> bool:
# p_prepadded: probability that a tensor is "prepadded", i.e. pad_mm excludes time it takes to pad from benchmarking
return random.choices([True, False], [p_prepadded, 1 - p_prepadded])[0]
def get_dtype(self) -> Any:
dtype_choices = [torch.float16, torch.bfloat16, torch.float32]
return random.choices(dtype_choices)[0]
if __name__ == "__main__":
runner = BenchmarkRunnerPadMM()
runner.run()
|
BenchmarkRunnerPadMM
|
python
|
huggingface__transformers
|
tests/models/qwen2_audio/test_modeling_qwen2_audio.py
|
{
"start": 1319,
"end": 4715
}
|
class ____:
def __init__(
self,
parent,
ignore_index=-100,
audio_token_index=0,
seq_length=25,
feat_seq_length=60,
text_config={
"model_type": "qwen2",
"intermediate_size": 36,
"initializer_range": 0.02,
"hidden_size": 32,
"max_position_embeddings": 52,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"use_labels": True,
"use_mrope": False,
"vocab_size": 99,
"pad_token_id": 1, # can't be the same as the audio token id
},
is_training=True,
audio_config={
"model_type": "qwen2_audio_encoder",
"d_model": 16,
"encoder_attention_heads": 4,
"encoder_ffn_dim": 16,
"encoder_layers": 2,
"num_mel_bins": 80,
"max_source_positions": 30,
"initializer_range": 0.02,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.audio_token_index = audio_token_index
self.text_config = text_config
self.audio_config = audio_config
self.seq_length = seq_length
self.feat_seq_length = feat_seq_length
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.encoder_seq_length = seq_length
def get_config(self):
return Qwen2AudioConfig(
text_config=self.text_config,
audio_config=self.audio_config,
ignore_index=self.ignore_index,
audio_token_index=self.audio_token_index,
)
def prepare_config_and_inputs(self):
input_features_values = floats_tensor(
[
self.batch_size,
self.audio_config["num_mel_bins"],
self.feat_seq_length,
]
)
config = self.get_config()
feature_attention_mask = torch.ones([self.batch_size, self.feat_seq_length], dtype=torch.long).to(torch_device)
return config, input_features_values, feature_attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_features_values, feature_attention_mask = config_and_inputs
input_length = (input_features_values.shape[-1] - 1) // 2 + 1
num_audio_tokens = (input_length - 2) // 2 + 1
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
attention_mask[:, :1] = 0
# we are giving 3 audios let's make sure we pass in 3 audios tokens
input_ids[:, 1 : 1 + num_audio_tokens] = config.audio_token_index
inputs_dict = {
"input_features": input_features_values,
"feature_attention_mask": feature_attention_mask,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
|
Qwen2AudioModelTester
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/objects/tool_node_mapping.py
|
{
"start": 3033,
"end": 3873
}
|
class ____(BaseObjectNodeMapping[QueryEngineTool]):
"""Base query tool node mapping."""
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME,
) -> "BaseQueryToolNodeMapping":
raise NotImplementedError(
"This object node mapping does not support persist method."
)
@property
def obj_node_mapping(self) -> Dict[int, Any]:
"""The mapping data structure between node and object."""
raise NotImplementedError("Subclasses should implement this!")
def persist(
self, persist_dir: str = ..., obj_node_mapping_fname: str = ...
) -> None:
"""Persist objs."""
raise NotImplementedError("Subclasses should implement this!")
|
BaseQueryToolNodeMapping
|
python
|
Textualize__textual
|
src/textual/reactive.py
|
{
"start": 16065,
"end": 18056
}
|
class ____(Reactive[ReactiveType]):
"""Create a reactive attribute (with no auto-refresh).
Args:
default: A default value or callable that returns a default.
init: Call watchers on initialize (post mount).
always_update: Call watchers even when the new value equals the old value.
bindings: Refresh bindings when the reactive changes.
toggle_class: An optional TCSS classname(s) to toggle based on the truthiness of the value.
"""
def __init__(
self,
default: ReactiveType | Callable[[], ReactiveType] | Initialize[ReactiveType],
init: bool = True,
always_update: bool = False,
bindings: bool = False,
toggle_class: str | None = None,
) -> None:
super().__init__(
default,
layout=False,
repaint=False,
init=init,
always_update=always_update,
bindings=bindings,
toggle_class=toggle_class,
)
def _watch(
node: DOMNode,
obj: Reactable,
attribute_name: str,
callback: WatchCallbackType,
*,
init: bool = True,
) -> None:
"""Watch a reactive variable on an object.
Args:
node: The node that created the watcher.
obj: The parent object.
attribute_name: The attribute to watch.
callback: A callable to call when the attribute changes.
init: True to call watcher initialization.
"""
if not hasattr(obj, "__watchers"):
setattr(obj, "__watchers", {})
watchers: dict[str, list[tuple[Reactable, WatchCallbackType]]]
watchers = getattr(obj, "__watchers")
watcher_list = watchers.setdefault(attribute_name, [])
if any(callback == callback_from_list for _, callback_from_list in watcher_list):
return
if init:
current_value = getattr(obj, attribute_name, None)
invoke_watcher(obj, callback, current_value, current_value)
watcher_list.append((node, callback))
|
var
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/network/aix.py
|
{
"start": 856,
"end": 5892
}
|
class ____(GenericBsdIfconfigNetwork):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
def get_default_interfaces(self, route_path):
interface = dict(v4={}, v6={})
netstat_path = self.module.get_bin_path('netstat')
if netstat_path is None:
return interface['v4'], interface['v6']
rc, out, err = self.module.run_command([netstat_path, '-nr'])
lines = out.splitlines()
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == 'default':
if '.' in words[1]:
interface['v4']['gateway'] = words[1]
interface['v4']['interface'] = words[5]
elif ':' in words[1]:
interface['v6']['gateway'] = words[1]
interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses=[],
all_ipv6_addresses=[],
)
uname_rc = uname_out = uname_err = None
uname_path = self.module.get_bin_path('uname')
if uname_path:
uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match(r'^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[current_if['device']] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# don't bother with wpars it does not work
# zero means not in wpar
if not uname_rc and uname_out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = self.module.get_bin_path('entstat')
if entstat_path:
rc, out, err = self.module.run_command([entstat_path, current_if['device']])
if rc != 0:
break
for line in out.splitlines():
if not line:
pass
buff = re.match('^Hardware Address: (.*)', line)
if buff:
current_if['macaddress'] = buff.group(1)
buff = re.match('^Device Type:', line)
if buff and re.match('.*Ethernet', line):
current_if['type'] = 'ether'
# device must have mtu attribute in ODM
if 'mtu' not in current_if:
lsattr_path = self.module.get_bin_path('lsattr')
if lsattr_path:
rc, out, err = self.module.run_command([lsattr_path, '-El', current_if['device']])
if rc != 0:
break
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
|
AIXNetwork
|
python
|
eventlet__eventlet
|
tests/mock.py
|
{
"start": 3852,
"end": 9801
}
|
class ____:
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _getsignature(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
if inPy3k:
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
else:
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
if inPy3k:
signature = inspect.formatargspec(
regargs, varargs, varkw, defaults,
kwonly, kwonlydef, ann, formatvalue=lambda value: "")
else:
signature = inspect.formatargspec(
regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
# funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec(src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and ret is not mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
|
_slotted
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-jina-ai-reader/source_jina_ai_reader/config_migration.py
|
{
"start": 439,
"end": 3071
}
|
class ____:
"""
This class stands for migrating the search prompt config at runtime to encode characters.
"""
message_repository: MessageRepository = InMemoryMessageRepository()
@classmethod
def should_migrate(cls, config: Mapping[str, Any]) -> bool:
"""
based on the source spec.
Returns:
> True, if the transformation is necessary
> False, otherwise.
> Raises the Exception if the structure could not be migrated.
"""
return cls.is_url_encoded(config["search_prompt"]) # if search_prompt given in config is not encoded
@classmethod
def is_url_encoded(cls, s: str) -> bool:
return s == urllib.parse.unquote(s)
@classmethod
def modify(cls, config: Mapping[str, Any]) -> Mapping[str, Any]:
if "search_prompt" in config:
if cls.is_url_encoded(config["search_prompt"]):
config["search_prompt"] = urllib.parse.quote(config["search_prompt"])
else:
raise ValueError(f"Invalid config. got {config}")
return config
@classmethod
def modify_and_save(cls, config_path: str, source: Source, config: Mapping[str, Any]) -> Mapping[str, Any]:
# modify the config
migrated_config = cls.modify(config)
# save the config
source.write_config(migrated_config, config_path)
# return modified config
return migrated_config
@classmethod
def emit_control_message(cls, migrated_config: Mapping[str, Any]) -> None:
# add the Airbyte Control Message to message repo
cls.message_repository.emit_message(create_connector_config_control_message(migrated_config))
# emit the Airbyte Control Message from message queue to stdout
for message in cls.message_repository._message_queue:
print(message.json(exclude_unset=True))
@classmethod
def migrate(cls, args: List[str], source: Source) -> None:
"""
This method checks the input args, should the config be migrated,
transform if necessary and emit the CONTROL message.
"""
# get config path
config_path = AirbyteEntrypoint(source).extract_config(args)
# proceed only if `--config` arg is provided
if config_path:
# read the existing config
config = source.read_config(config_path)
# migration check
if cls.should_migrate(config):
cls.emit_control_message(
cls.modify_and_save(config_path, source, config),
)
|
JinaAiReaderConfigMigration
|
python
|
tornadoweb__tornado
|
demos/websocket/chatdemo.py
|
{
"start": 1400,
"end": 1536
}
|
class ____(tornado.web.RequestHandler):
def get(self):
self.render("index.html", messages=ChatSocketHandler.cache)
|
MainHandler
|
python
|
scipy__scipy
|
scipy/stats/_continuous_distns.py
|
{
"start": 69364,
"end": 70760
}
|
class ____(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("b", False, (0, np.inf), (False, False))]
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
|
exponpow_gen
|
python
|
huggingface__transformers
|
src/transformers/models/smollm3/modular_smollm3.py
|
{
"start": 13153,
"end": 13196
}
|
class ____(Qwen2Model):
pass
|
SmolLM3Model
|
python
|
spyder-ide__spyder
|
spyder/plugins/plots/plugin.py
|
{
"start": 426,
"end": 2730
}
|
class ____(SpyderDockablePlugin, ShellConnectPluginMixin):
"""
Plots plugin.
"""
NAME = 'plots'
REQUIRES = [Plugins.IPythonConsole]
TABIFY = [Plugins.VariableExplorer, Plugins.Help]
WIDGET_CLASS = PlotsWidget
CONF_SECTION = NAME
CONF_FILE = False
DISABLE_ACTIONS_WHEN_HIDDEN = False
# ---- SpyderDockablePlugin API
# ------------------------------------------------------------------------
@staticmethod
def get_name():
return _('Plots')
@staticmethod
def get_description():
return _('View, browse and save generated figures.')
@classmethod
def get_icon(cls):
return cls.create_icon('plot')
def on_initialize(self):
# If a figure is loaded, raise the dockwidget the first time
# a plot is generated.
self.get_widget().sig_figure_loaded.connect(self._on_first_plot)
# ---- Public API
# ------------------------------------------------------------------------
def add_plot(self, fig, fmt, shellwidget):
"""
Add a plot to the specified figure browser.
Add the plot to the figure browser with the given shellwidget. Also,
bring the plugin to the front and raise the window that it is in so
that the plot is shown.
If no figure browser with the given shellwidget exists, then nothing
happens.
"""
self.switch_to_plugin(force_focus=False)
self.get_widget().window().raise_()
self.get_widget().add_plot(fig, fmt, shellwidget)
# ---- Private API
# ------------------------------------------------------------------------
def _on_first_plot(self):
"""Actions to execute after the first plot is generated."""
# Only switch when inline plotting is muted. This avoids
# showing the plugin when users want to only see plots in
# the IPython console.
# Fixes spyder-ide/spyder#15467
if self.get_conf('mute_inline_plotting'):
self.switch_to_plugin(force_focus=False)
# We only give raise to the plugin once per session, to let users
# know that plots are displayed in this plugin.
# Fixes spyder-ide/spyder#15705
self.get_widget().sig_figure_loaded.disconnect(self._on_first_plot)
|
Plots
|
python
|
xlwings__xlwings
|
xlwings/conversion/framework.py
|
{
"start": 664,
"end": 1092
}
|
class ____(dict):
def __init__(self, original):
super(Options, self).__init__(original)
def override(self, **overrides):
self.update(overrides)
return self
def erase(self, keys):
for key in keys:
self.pop(key, None)
return self
def defaults(self, **defaults):
for k, v in defaults.items():
self.setdefault(k, v)
return self
|
Options
|
python
|
oauthlib__oauthlib
|
oauthlib/oauth2/rfc8628/errors.py
|
{
"start": 1564,
"end": 1684
}
|
class ____(OAuth2Error):
"""
The authorization request was denied.
"""
error = "access_denied"
|
AccessDenied
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/__init__.py
|
{
"start": 16629,
"end": 16694
}
|
class ____(CPPObject):
object_type = 'concept'
|
CPPConceptObject
|
python
|
jina-ai__jina
|
jina/clients/mixin.py
|
{
"start": 2822,
"end": 3709
}
|
class ____(MutateMixin):
"""The async GraphQL Mutation Mixin for Client and Flow"""
async def mutate(
self,
mutation: str,
variables: Optional[dict] = None,
timeout: Optional[float] = None,
headers: Optional[dict] = None,
):
"""Perform a GraphQL mutation, asynchronously
:param mutation: the GraphQL mutation as a single string.
:param variables: variables to be substituted in the mutation. Not needed if no variables are present in the mutation string.
:param timeout: HTTP request timeout
:param headers: HTTP headers
:return: dict containing the optional keys ``data`` and ``errors``, for response data and errors.
"""
return await get_or_reuse_loop().run_in_executor(
None, super().mutate, mutation, variables, timeout, headers
)
|
AsyncMutateMixin
|
python
|
tensorflow__tensorflow
|
tensorflow/dtensor/python/tests/mesh_util_test.py
|
{
"start": 1205,
"end": 10709
}
|
class ____(test_util.DTensorBaseTest):
"""Tests for mesh_util that do not require accelerator initialization."""
def test_mesh_creation(self):
self.skipForDeviceType(
['TPU'], reason='Test is intended for CPUs and GPUs.'
)
mesh = mesh_util.create_mesh()
num_devices = len(test_util.list_local_logical_devices(mesh.device_type()))
self.assertEqual(mesh.num_local_devices(), num_devices)
self.assertEqual(mesh.size, num_devices)
def test_mesh_dict_creation(self):
self.skipForDeviceType(
['TPU'], reason='Test is intended for CPUs and GPUs.'
)
num_devices = len(test_util.list_local_logical_devices('CPU'))
mesh = mesh_util.create_mesh({'x': num_devices, 'y': 1}, device_type='CPU')
num_devices = len(test_util.list_local_logical_devices(mesh.device_type()))
self.assertEqual(mesh.num_local_devices(), num_devices)
self.assertEqual(mesh.dim_names, ['x', 'y'])
self.assertEqual(mesh.size, num_devices)
def test_tpu_mesh_creation(self):
self.skipForDeviceType(['CPU', 'GPU'], reason='Test is intended for TPUs.')
mesh = mesh_util.create_mesh(mesh_name='1d_mesh', device_type='TPU')
num_devices = len(test_util.list_local_logical_devices('TPU'))
self.assertEqual(mesh.num_local_devices(), num_devices)
self.assertEqual(mesh.size, num_devices)
@parameterized.named_parameters(('use_xla_spmd', True),
('do_not_use_xla_spmd', False))
def test_tpu_2d_mesh_creation(self, use_xla_spmd):
self.skipForDeviceType(['CPU', 'GPU'], reason='Test is intended for TPUs.')
self.skipForDeviceType(['TPU'],
reason='Test requires exactly 2 cores',
unless_device_count_equals_to=2)
devices = test_util.list_local_logical_devices('TPU')
self.assertLen(devices, 2)
mesh = mesh_util.create_mesh([('x', 2), ('y', 1)],
device_type='TPU',
use_xla_spmd=use_xla_spmd)
self.assertEqual(mesh.num_local_devices(), 2)
self.assertEqual(mesh.size, 2)
self.assertAllEqual(mesh.dim_names, ['x', 'y'])
self.assertEqual(mesh.use_xla_spmd(), use_xla_spmd)
def test_tpu_2d_mesh_creation_with_devices(self):
self.skipForDeviceType(['CPU', 'GPU'], reason='Test is intended for TPUs.')
self.skipForDeviceType(['TPU'],
reason='Test requires at least 2 cores',
unless_device_count_equals_to=2)
devices = test_util.list_local_logical_devices('TPU')
self.assertLen(devices, 2)
mesh = mesh_util.create_mesh([('x', 2), ('y', 1)],
devices=['/device:tpu:0', '/device:tpu:1'])
self.assertEqual(mesh.num_local_devices(), 2)
self.assertEqual(mesh.size, 2)
self.assertAllEqual(mesh.dim_names, ['x', 'y'])
def test_tpu_2d_mesh_creation_with_device_specs(self):
self.skipForDeviceType(['CPU', 'GPU'], reason='Test is intended for TPUs.')
self.skipForDeviceType(['TPU'],
reason='Test requires at least 2 cores',
unless_device_count_equals_to=2)
devices = test_util.list_local_logical_devices('TPU')
self.assertLen(devices, 2)
mesh = mesh_util.create_mesh(
[('x', 2), ('y', 1)],
devices=[
tf_device.DeviceSpec.from_string('/tpu:0'),
tf_device.DeviceSpec.from_string('/tpu:1'),
],
)
self.assertEqual(mesh.num_local_devices(), 2)
self.assertEqual(mesh.size, 2)
self.assertAllEqual(mesh.dim_names, ['x', 'y'])
def test_single_client_mesh_creation(self):
self.skipForDeviceType(['GPU', 'TPU'], reason='Test is intended for CPUs')
num_devices = len(test_util.list_local_logical_devices('CPU'))
with mock.patch.object(accelerator_util,
'is_initialized') as is_initialized:
is_initialized.return_value = True
mesh = mesh_util.create_distributed_mesh(
mesh_name='single_client_1d_mesh', mesh_dims=[('x', num_devices)])
self.assertEqual(mesh.num_local_devices(), num_devices)
self.assertEqual(mesh.size, num_devices)
def test_single_client_mesh_dict_creation(self):
self.skipForDeviceType(['GPU', 'TPU'], reason='Test is intended for CPUs')
num_devices = len(test_util.list_local_logical_devices('CPU'))
with mock.patch.object(
accelerator_util, 'is_initialized'
) as is_initialized:
is_initialized.return_value = True
mesh = mesh_util.create_distributed_mesh(
mesh_name='single_client_1d_mesh',
mesh_dims={'x': num_devices, 'y': 1},
)
self.assertEqual(mesh.num_local_devices(), num_devices)
self.assertEqual(mesh.dim_names, ['x', 'y'])
self.assertEqual(mesh.size, num_devices)
def test_single_client_mesh_with_local_devices(self):
self.skipForDeviceType(['GPU', 'TPU'], reason='Test is intended for CPUs')
with mock.patch.object(accelerator_util,
'is_initialized') as is_initialized:
is_initialized.return_value = True
mesh = mesh_util.create_distributed_mesh(
mesh_name='single_client_1d_mesh',
mesh_dims=[('x', 1)],
local_devices=['CPU:0'])
self.assertEqual(mesh.num_local_devices(), 1)
self.assertEqual(mesh.size, 1)
def test_create_distributed_mesh_requires_initialize(self):
self.skipForDeviceType(['GPU', 'TPU'], reason='Test is intended for CPUs')
with mock.patch.object(accelerator_util,
'is_initialized') as is_initialized:
is_initialized.return_value = False
with self.assertRaisesRegex(ValueError, 'Accelerators are uninitialized'):
_ = mesh_util.create_distributed_mesh(
mesh_name='single_client_1d_mesh',
mesh_dims=[('x', 1)],
local_devices=['CPU:0'])
def test_single_client_mesh_creation_wrong_shape(self):
self.skipForDeviceType(['GPU', 'TPU'], reason='Test is intended for CPUs')
num_devices = len(test_util.list_local_logical_devices('CPU'))
with mock.patch.object(accelerator_util,
'is_initialized') as is_initialized:
is_initialized.return_value = True
with self.assertRaisesRegex(ValueError,
'must be equal to total size of the mesh'):
mesh_util.create_distributed_mesh(
mesh_name='single_client_1d_mesh',
mesh_dims=[('x', num_devices * 2)])
def test_single_client_mesh_creation_using_fewer_devices(self):
self.skipForDeviceType(['GPU', 'TPU'], reason='Test is intended for CPUs')
test_util.reset_logical_devices('CPU', 4)
with mock.patch.object(accelerator_util,
'is_initialized') as is_initialized:
is_initialized.return_value = True
mesh = mesh_util.create_distributed_mesh(
mesh_name='single_client_1d_mesh',
mesh_dims=[('x', 2)],
local_devices=['CPU:0', 'CPU:1'])
self.assertEqual(mesh.num_local_devices(), 2)
self.assertEqual(mesh.size, 2)
mesh = mesh_util.create_distributed_mesh(
mesh_name='single_client_1d_mesh',
mesh_dims=[('x', 2)],
local_devices=['CPU:0', 'CPU:1'])
self.assertEqual(mesh.num_local_devices(), 2)
self.assertEqual(mesh.size, 2)
def test_single_client_mesh_creation_with_xla_spmd_raises_error(self):
self.skipForDeviceType(['TPU'],
reason='Test is intended for non TPU devices')
test_util.reset_logical_devices('CPU', 4)
with mock.patch.object(accelerator_util,
'is_initialized') as is_initialized:
is_initialized.return_value = True
with self.assertRaisesRegex(
ValueError, 'XLA SPMD is not currently not supported for'):
mesh_util.create_distributed_mesh(
mesh_name='single_client_mesh',
mesh_dims=[('x', 2)],
local_devices=['CPU:0', 'CPU:1'],
use_xla_spmd=True)
@mock.patch.object(config, 'num_clients')
@mock.patch.object(accelerator_util, 'is_initialized')
def test_multi_client_mesh_creation(self, num_clients, is_initialized):
self.skipForDeviceType(['GPU', 'TPU'], reason='Test is intended for CPUs')
with mock.patch.object(accelerator_util,
'is_initialized') as is_initialized:
with mock.patch.object(config, 'num_clients') as num_clients:
num_clients.return_value = 2
is_initialized.return_value = True
test_util.reset_context()
cpus = tf_config.list_physical_devices('CPU')
tf_config.set_logical_device_configuration(
cpus[0], [context.LogicalDeviceConfiguration()] * 4
)
with mock.patch.object(config, 'client_id', return_value=0):
mesh_1 = mesh_util.create_distributed_mesh(
mesh_name='multi_client_1d_mesh_1',
mesh_dims=[('x', 4)],
local_devices=['CPU:0', 'CPU:1'])
self.assertEqual(mesh_1.num_local_devices(), 2)
self.assertEqual(mesh_1.size, 4)
with mock.patch.object(config, 'client_id', return_value=1):
mesh_2 = mesh_util.create_distributed_mesh(
mesh_name='multi_client_1d_mesh_2',
mesh_dims=[('x', 4)],
local_devices=['CPU:2', 'CPU:3'])
self.assertEqual(mesh_2.num_local_devices(), 2)
self.assertEqual(mesh_2.size, 4)
|
MeshUtilTest
|
python
|
PyCQA__pylint
|
tests/functional/u/undefined/undefined_variable_py30.py
|
{
"start": 2500,
"end": 2658
}
|
class ____(type):
def __new__(mcs, *args, parameter=None, **kwargs):
print(parameter)
return super().__new__(mcs, *args, **kwargs)
|
MetaClass
|
python
|
huggingface__transformers
|
src/transformers/models/idefics2/processing_idefics2.py
|
{
"start": 1565,
"end": 11531
}
|
class ____(ProcessorMixin):
r"""
Constructs a IDEFICS2 processor which wraps a LLama tokenizer and IDEFICS2 image processor into a single processor.
[`IdeficsProcessor`] offers all the functionalities of [`Idefics2ImageProcessor`] and [`LlamaTokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`Idefics2ImageProcessor`):
An instance of [`Idefics2ImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
image_seq_len (`int`, *optional*, defaults to 64):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
This parameter is used to build the string from the input prompt and image tokens and should match the
config.perceiver_config.resampler_n_latents value for the model used.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(
self, image_processor, tokenizer=None, image_seq_len: int = 64, chat_template: Optional[str] = None, **kwargs
):
if not hasattr(tokenizer, "image_token"):
self.fake_image_token = AddedToken("<fake_token_around_image>", normalized=False, special=True).content
self.image_token = AddedToken("<image>", normalized=False, special=True).content
tokens_to_add = {"additional_special_tokens": [self.fake_image_token, self.image_token]}
tokenizer.add_special_tokens(tokens_to_add)
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
else:
self.fake_image_token = tokenizer.image_boundary_token
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
self.end_of_utterance_token = AddedToken("<end_of_utterance>", normalized=False, special=True)
tokenizer.add_special_tokens({"additional_special_tokens": [self.end_of_utterance_token]})
self.image_seq_len = image_seq_len
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def _extract_images_from_prompts(self, prompts):
prompt_images = []
for prompt in prompts:
images = []
for elem in prompt:
if is_valid_image(elem):
images.append(elem)
elif is_url(elem):
images.append(load_image(elem))
prompt_images.append(images)
return prompt_images
def __call__(
self,
images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]] = None,
text: Union[TextInput, "PreTokenizedInput", list[TextInput], list["PreTokenizedInput"]] = None,
**kwargs: Unpack[Idefics2ProcessorKwargs],
) -> BatchFeature:
"""
Processes the input prompts and returns a BatchEncoding.
Example:
```python
>>> import requests
>>> from transformers import Idefics2Processor
>>> from transformers.image_utils import load_image
>>> processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2)
>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]
>>> text = [
... "<image>In this image, we see",
... "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<s><fake_token_around_image><image><image><fake_token_around_image> In this image, we see', '<s> bla bla bla<fake_token_around_image><image><image><fake_token_around_image>']
```
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Wherever an image token, `<image>` is encountered it is expanded to
`<fake_token_around_image>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
return_tensors (`Union[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
"""
if text is None and images is None:
raise ValueError("You must provide either `text` or `images`.")
output_kwargs = self._merge_kwargs(
Idefics2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
n_images_in_text = []
inputs = {}
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
# Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
fake_image_token = self.fake_image_token
image_token = self.image_token
image_str = f"{fake_image_token}{image_token * self.image_seq_len}{fake_image_token}"
if self.image_processor.do_image_splitting:
# A single image token is split into 4 patches + 1 original image
image_str = image_str * 5
prompt_strings = []
closing_fake_pattern = re.compile(rf"{re.escape(fake_image_token)}(?=[^\s<])")
for sample in text:
n_images_in_text.append(sample.count(image_token))
sample = sample.replace(image_token, image_str)
# Remove any double fake tokens if images are adjacent
sample = sample.replace(f"{fake_image_token}{fake_image_token}", f"{fake_image_token}")
# Ensure words attached directly after the closing fake token remain word-boundary aligned
sample = closing_fake_pattern.sub(f"{fake_image_token} ", sample)
prompt_strings.append(sample)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
inputs.update(text_inputs)
if images is not None:
if is_image_or_image_url(images):
images = [[images]]
elif isinstance(images, (list, tuple)) and is_image_or_image_url(images[0]):
if text is not None:
if sum(n_images_in_text) != len(images):
raise ValueError(
f"The total number of {image_token} tokens in the prompts should be the same as the number of images passed."
f" Found {sum(n_images_in_text)} {image_token} tokens and {len(images)} images."
)
# Reorganize the images to match the prompts
cumsum_images_in_text = [0] + list(accumulate(n_images_in_text))
images = [
images[cumsum_images_in_text[i] : cumsum_images_in_text[i + 1]]
for i in range(len(n_images_in_text))
]
else:
images = [images]
elif (
not isinstance(images, (list, tuple))
and not isinstance(images[0], (list, tuple))
and not is_image_or_image_url(images[0][0])
):
raise ValueError(
"Invalid input images. Please provide a single image or a list of images or a list of list of images."
)
n_images_in_images = [len(sample) for sample in images]
if text is not None and not n_images_in_images == n_images_in_text:
raise ValueError(
f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
)
# Load images if they are URLs
images = [[load_image(im) for im in sample] for sample in images]
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
inputs.update(image_inputs)
return BatchFeature(inputs, tensor_type=return_tensors)
__all__ = ["Idefics2Processor"]
|
Idefics2Processor
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_redirects.py
|
{
"start": 4167,
"end": 5798
}
|
class ____(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.proj = Project.objects.get(slug="read-the-docs")
self.redirect = get(Redirect, project=self.proj)
def test_http_filenames_return_themselves(self):
# If the crossdomain flag is False (default), then we don't redirect to a different host
self.assertEqual(
self.redirect.get_full_path("http://rtfd.org"),
"/en/latest/http://rtfd.org",
)
self.assertEqual(
self.redirect.get_full_path("http://rtfd.org", allow_crossdomain=True),
"http://rtfd.org",
)
def test_redirects_no_subdomain(self):
self.assertEqual(
self.redirect.get_full_path("index.html"),
"/en/latest/index.html",
)
@override_settings(
PRODUCTION_DOMAIN="rtfd.org",
)
def test_redirects_with_subdomain(self):
self.assertEqual(
self.redirect.get_full_path("faq.html"),
"/en/latest/faq.html",
)
@override_settings(
PRODUCTION_DOMAIN="rtfd.org",
)
def test_single_version_with_subdomain(self):
self.redirect.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.assertEqual(
self.redirect.get_full_path("faq.html"),
"/faq.html",
)
def test_single_version_no_subdomain(self):
self.redirect.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.assertEqual(
self.redirect.get_full_path("faq.html"),
"/faq.html",
)
|
GetFullPathTests
|
python
|
google__jax
|
jax/_src/pallas/core.py
|
{
"start": 13044,
"end": 13685
}
|
class ____:
"""Helper class that checks for index_map equality."""
def __init__(self, index_map):
self.index_map = index_map
functools.update_wrapper(self, self.index_map)
def __eq__(self, other: object):
if not isinstance(other, _IndexMapFunc):
return NotImplemented
return self.index_map == other.index_map
def __call__(self, *args, **kwargs):
out_indices = self.index_map(*args, **kwargs)
if isinstance(out_indices, list):
out_indices = tuple(out_indices)
if not isinstance(out_indices, tuple):
out_indices = (out_indices,)
return out_indices
@dataclasses.dataclass
|
_IndexMapFunc
|
python
|
astropy__astropy
|
astropy/nddata/mixins/ndio.py
|
{
"start": 1880,
"end": 3395
}
|
class ____(registry.UnifiedReadWrite):
"""Write this CCDData object out in the specified format.
This function provides the NDData interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.nddata import CCDData
>>> dat = CCDData(np.zeros((12, 12)), unit='adu') # 12x12 image of zeros
>>> dat.write('zeros.fits')
Get help on the available writers for ``CCDData`` using the``help()`` method::
>>> CCDData.write.help() # Get help writing CCDData and list supported formats
>>> CCDData.write.help('fits') # Get detailed help on CCDData FITS writer
>>> CCDData.write.list_formats() # Print list of available formats
For more information see:
- https://docs.astropy.org/en/stable/nddata
- https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str, optional
File format specifier.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "write", registry=None)
# uses default global registry
def __call__(self, *args, **kwargs):
self.registry.write(self._instance, *args, **kwargs)
|
NDDataWrite
|
python
|
doocs__leetcode
|
solution/1600-1699/1664.Ways to Make a Fair Array/Solution.py
|
{
"start": 0,
"end": 417
}
|
class ____:
def waysToMakeFair(self, nums: List[int]) -> int:
s1, s2 = sum(nums[::2]), sum(nums[1::2])
ans = t1 = t2 = 0
for i, v in enumerate(nums):
ans += i % 2 == 0 and t2 + s1 - t1 - v == t1 + s2 - t2
ans += i % 2 == 1 and t2 + s1 - t1 == t1 + s2 - t2 - v
t1 += v if i % 2 == 0 else 0
t2 += v if i % 2 == 1 else 0
return ans
|
Solution
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/multiindex_object.py
|
{
"start": 10819,
"end": 11532
}
|
class ____:
def setup(self):
N = 10**5
level1 = range(1_000)
level2 = date_range(start="1/1/2000", periods=N // 1000)
self.midx = MultiIndex.from_product([level1, level2])
level1 = range(1_000, 2_000)
self.midx_values = MultiIndex.from_product([level1, level2])
level2 = date_range(start="1/1/2010", periods=N // 1000)
self.midx_values_different = MultiIndex.from_product([level1, level2])
self.mask = np.array([True, False] * (N // 2))
def time_putmask(self):
self.midx.putmask(self.mask, self.midx_values)
def time_putmask_all_different(self):
self.midx.putmask(self.mask, self.midx_values_different)
|
Putmask
|
python
|
huggingface__transformers
|
src/transformers/models/data2vec/modeling_data2vec_audio.py
|
{
"start": 4543,
"end": 5067
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.layers = nn.ModuleList(
[Data2VecAudioPositionalConvLayer(config) for _ in range(config.num_conv_pos_embeddings)]
)
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
for layer in self.layers:
hidden_states = layer(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
Data2VecAudioPositionalConvEmbedding
|
python
|
pypa__warehouse
|
warehouse/manage/forms.py
|
{
"start": 19913,
"end": 21802
}
|
class ____(wtforms.Form):
__params__ = ["display_name", "link_url", "description", "orgtype"]
display_name = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(message="Specify your organization name"),
wtforms.validators.Length(
max=100,
message=_(
"The organization name is too long. "
"Choose a organization name with 100 characters or less."
),
),
]
)
link_url = wtforms.URLField(
validators=[
wtforms.validators.InputRequired(message="Specify your organization URL"),
wtforms.validators.Length(
max=400,
message=_(
"The organization URL is too long. "
"Choose a organization URL with 400 characters or less."
),
),
wtforms.validators.Regexp(
r"^https?://",
message=_("The organization URL must start with http:// or https://"),
),
]
)
description = wtforms.TextAreaField(
validators=[
wtforms.validators.InputRequired(
message="Specify your organization description"
),
wtforms.validators.Length(
max=400,
message=_(
"The organization description is too long. "
"Choose a organization description with 400 characters or less."
),
),
]
)
orgtype = wtforms.SelectField(
choices=[("Company", "Company"), ("Community", "Community")],
default=None,
coerce=OrganizationType,
validators=[
wtforms.validators.InputRequired(message="Select organization type"),
],
)
|
SaveOrganizationForm
|
python
|
pytorch__pytorch
|
torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py
|
{
"start": 17267,
"end": 17683
}
|
class ____(RpcAgentTestFixture):
@property
def world_size(self) -> int:
return NUM_TRAINERS
def trainer_name(self, rank):
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{rank}"
@staticmethod
def get_remote_grads(rref, context_id):
return dist_autograd.get_gradients(context_id)[rref.local_value().weight]
|
CommonDdpComparisonTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/operators/py_builtins_test.py
|
{
"start": 1595,
"end": 27219
}
|
class ____(test.TestCase):
def test_abs(self):
self.assertEqual(py_builtins.abs_(-1), 1)
with self.cached_session() as sess:
t = py_builtins.abs_(constant_op.constant(-1))
self.assertEqual(self.evaluate(t), 1)
t = py_builtins.abs_(constant_op.constant([-1, 2, -3]))
self.assertAllEqual(self.evaluate(t), [1, 2, 3])
def test_abs_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 1)
self.assertAllEqual(self.evaluate(iterator.get_next()), 2)
self.assertAllEqual(self.evaluate(iterator.get_next()), 3)
def test_abs_dataset_zipped(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([1, -2, 3])
dataset = dataset_ops.DatasetV2.zip((dataset_1, dataset_2))
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (1, 1))
self.assertAllEqual(self.evaluate(iterator.get_next()), (2, 2))
self.assertAllEqual(self.evaluate(iterator.get_next()), (3, 3))
def test_abs_dataset_mixed(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([1, -2, 3])
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([-1, -2, -3])
dataset_4 = dataset_ops.DatasetV2.zip((dataset_1, dataset_2))
dataset = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
for i in range(1, 4):
actual = self.evaluate(iterator.get_next())
self.assertAllEqual(actual[0], i)
self.assertAllEqual(actual[1], (i, i))
def test_float(self):
self.assertEqual(py_builtins.float_(10), 10.0)
self.assertEqual(py_builtins.float_('10.0'), 10.0)
with self.cached_session() as sess:
t = py_builtins.float_(constant_op.constant(1, dtype=dtypes.int64))
self.assertEqual(self.evaluate(t), 1.0)
st = py_builtins.float_(constant_op.constant('1.0'))
self.assertEqual(self.evaluate(st), 1.0)
def test_int(self):
self.assertEqual(py_builtins.int_(10.0), 10)
self.assertEqual(py_builtins.int_('11', 2), 3)
with self.cached_session() as sess:
t = py_builtins.int_(constant_op.constant(1, dtype=dtypes.float64))
self.assertEqual(self.evaluate(t), 1)
st = py_builtins.int_(constant_op.constant('1'))
self.assertEqual(self.evaluate(st), 1)
st = py_builtins.int_(constant_op.constant('1'), 10)
self.assertEqual(self.evaluate(st), 1)
def test_int_unsupported_base(self):
t = constant_op.constant(1, dtype=dtypes.float64)
with self.assertRaises(NotImplementedError):
py_builtins.int_(t, 2)
def test_len(self):
self.assertEqual(py_builtins.len_([1, 2, 3]), 3)
with self.cached_session() as sess:
t = py_builtins.len_(constant_op.constant([[1], [2], [3]]))
self.assertEqual(t, 3)
ta = py_builtins.len_(tensor_array_ops.TensorArray(dtypes.int32, size=5))
self.assertEqual(self.evaluate(ta), 5)
tl = py_builtins.len_(data_structures.tf_tensor_list_new([3, 4, 5]))
self.assertEqual(self.evaluate(tl), 3)
def test_len_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices([3, 2, 1])
self.assertEqual(self.evaluate(py_builtins.len_(dataset)), 3)
# graph mode
@def_function.function(autograph=False)
def test_fn():
dataset = dataset_ops.DatasetV2.from_tensor_slices([3, 2, 1])
return py_builtins.len_(dataset)
self.assertEqual(self.evaluate(test_fn()), 3)
def test_len_dataset_infinite(self):
dataset = dataset_ops.DatasetV2.range(5).repeat().batch(2)
with self.assertRaises(errors_impl.InvalidArgumentError):
_ = self.evaluate(py_builtins.len_(dataset))
# graph mode
@def_function.function
def test_fn():
dataset = dataset_ops.DatasetV2.range(5).repeat().batch(2)
return py_builtins.len_(dataset)
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(test_fn())
def test_len_dataset_unknown(self):
dataset = dataset_ops.DatasetV2.range(5).filter(lambda _: True).batch(2)
with self.assertRaises(errors_impl.InvalidArgumentError):
_ = self.evaluate(py_builtins.len_(dataset))
# graph mode
@def_function.function(autograph=False)
def test_fn():
dataset = dataset_ops.DatasetV2.range(5).filter(lambda _: True).batch(2)
return py_builtins.len_(dataset)
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(test_fn())
def test_len_scalar(self):
with self.assertRaises(ValueError):
py_builtins.len_(constant_op.constant(1))
@test_util.run_deprecated_v1
def test_len_dynamic_shape(self):
with self.cached_session() as sess:
p = array_ops.placeholder(dtype=dtypes.int32, shape=None)
t = py_builtins.len_(p)
self.assertEqual(sess.run(t, {p: [1, 2, 3]}), 3)
with self.assertRaises(errors_impl.InvalidArgumentError):
t = py_builtins.len_(p)
sess.run(t, {p: 1})
@test_util.run_deprecated_v1
def test_print_tensors(self):
try:
out_capturer = io.StringIO()
sys.stdout = out_capturer
with self.cached_session() as sess:
sess.run(py_builtins.print_(constant_op.constant('test message'), 1))
self.assertEqual(out_capturer.getvalue(), 'test message 1\n')
finally:
sys.stdout = sys.__stdout__
@test_util.run_deprecated_v1
def test_print_complex(self):
try:
out_capturer = io.StringIO()
sys.stdout = out_capturer
with self.cached_session() as sess:
sess.run(
py_builtins.print_(constant_op.constant('test message'), [1, 2]))
self.assertEqual(out_capturer.getvalue(), 'test message [1, 2]\n')
finally:
sys.stdout = sys.__stdout__
def test_max(self):
self.assertEqual(py_builtins.max_([1, 3, 2]), 3)
self.assertEqual(py_builtins.max_(0, 2, 1), 2)
def test_max_tensor(self):
r = py_builtins.max_(constant_op.constant(2))
self.assertAllEqual(self.evaluate(r), 2)
with self.assertRaises(ValueError):
py_builtins.max_(constant_op.constant([[2]]))
r = py_builtins.max_(constant_op.constant([1, 3, 2]))
self.assertAllEqual(self.evaluate(r), 3)
with self.assertRaises(ValueError):
py_builtins.max_(constant_op.constant([[1, 3], [3, 4]]))
r = py_builtins.max_(
constant_op.constant(6), constant_op.constant(4),
constant_op.constant(8))
self.assertAllEqual(self.evaluate(r), 8)
with self.assertRaises(ValueError):
py_builtins.max_(
constant_op.constant([6]), constant_op.constant(4),
constant_op.constant(8))
def test_min(self):
self.assertEqual(py_builtins.min_([2, 1, 3]), 1)
self.assertEqual(py_builtins.min_(2, 0, 1), 0)
def test_min_tensor(self):
r = py_builtins.min_(constant_op.constant(2))
self.assertAllEqual(self.evaluate(r), 2)
with self.assertRaises(ValueError):
py_builtins.min_(constant_op.constant([[2]]))
r = py_builtins.min_(constant_op.constant([3, 1, 2]))
self.assertAllEqual(self.evaluate(r), 1)
with self.assertRaises(ValueError):
py_builtins.min_(constant_op.constant([[1, 3], [3, 4]]))
r = py_builtins.min_(
constant_op.constant(6), constant_op.constant(4),
constant_op.constant(8))
self.assertAllEqual(self.evaluate(r), 4)
with self.assertRaises(ValueError):
py_builtins.min_(
constant_op.constant([6]), constant_op.constant(4),
constant_op.constant(8))
def test_range(self):
self.assertListEqual(list(py_builtins.range_(3)), [0, 1, 2])
self.assertListEqual(list(py_builtins.range_(1, 3)), [1, 2])
self.assertListEqual(list(py_builtins.range_(2, 0, -1)), [2, 1])
def test_range_tensor(self):
with self.cached_session() as sess:
r = py_builtins.range_(constant_op.constant(3))
self.assertAllEqual(self.evaluate(r), [0, 1, 2])
r = py_builtins.range_(1, constant_op.constant(3))
self.assertAllEqual(self.evaluate(r), [1, 2])
r = py_builtins.range_(2, 0, constant_op.constant(-1))
self.assertAllEqual(self.evaluate(r), [2, 1])
def test_range_tensor_empty_range(self):
with self.session() as sess:
r = py_builtins.range_(constant_op.constant(-3))
self.assertAllEqual(self.evaluate(r), [])
r = py_builtins.range_(5, constant_op.constant(2))
self.assertAllEqual(self.evaluate(r), [])
def test_enumerate(self):
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1])), [(0, 3), (1, 2), (2, 1)])
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1], 5)), [(5, 3), (6, 2), (7, 1)])
self.assertListEqual(list(py_builtins.enumerate_([-8], -3)), [(-3, -8)])
def test_enumerate_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices(['a', 'c'])
start = constant_op.constant(20, dtype=dtypes.int64)
dataset = py_builtins.enumerate_(dataset, start)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (20, b'a'))
self.assertAllEqual(self.evaluate(iterator.get_next()), (21, b'c'))
def test_zip(self):
self.assertListEqual(
list(py_builtins.zip_([3, 2, 1], [1, 2, 3])), [(3, 1), (2, 2), (1, 3)])
self.assertListEqual(
list(py_builtins.zip_([4, 5, 6], [-1, -2])), [(4, -1), (5, -2)])
def test_zip_dataset(self):
ds1 = dataset_ops.DatasetV2.from_tensor_slices([-11, -12, 4])
ds2 = dataset_ops.DatasetV2.from_tensor_slices([-21, -22, 5])
ds3 = py_builtins.zip_(ds1, ds2)
iterator = dataset_ops.make_one_shot_iterator(ds3)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (-11, -21))
self.assertAllEqual(self.evaluate(iterator.get_next()), (-12, -22))
self.assertAllEqual(self.evaluate(iterator.get_next()), (4, 5))
def test_map(self):
def increment(x):
return x + 1
add_list = lambda x, y: x + y
self.assertListEqual(
list(py_builtins.map_(increment, [4, 5, 6])), [5, 6, 7])
self.assertListEqual(
list(py_builtins.map_(add_list, [3, 2, 1], [-1, -2, -3])), [2, 0, -2])
def test_map_dataset(self):
def increment(x):
return x + 1
ds1 = dataset_ops.DatasetV2.from_tensor_slices([4, 5, 6])
ds2 = py_builtins.map_(increment, ds1)
iterator = dataset_ops.make_one_shot_iterator(ds2)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 5)
self.assertAllEqual(self.evaluate(iterator.get_next()), 6)
self.assertAllEqual(self.evaluate(iterator.get_next()), 7)
def test_map_multiple_datasets(self):
add_list = lambda x, y: x + y
ds1 = dataset_ops.DatasetV2.from_tensor_slices([-11, -12, 4])
ds2 = dataset_ops.DatasetV2.from_tensor_slices([-21, -22, 5])
ds3 = py_builtins.map_(add_list, ds1, ds2)
iterator = dataset_ops.make_one_shot_iterator(ds3)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), -32)
self.assertAllEqual(self.evaluate(iterator.get_next()), -34)
self.assertAllEqual(self.evaluate(iterator.get_next()), 9)
def test_next_normal(self):
iterator = iter([1, 2, 3])
self.assertEqual(py_builtins.next_(iterator), 1)
self.assertEqual(py_builtins.next_(iterator), 2)
self.assertEqual(py_builtins.next_(iterator), 3)
with self.assertRaises(StopIteration):
py_builtins.next_(iterator)
self.assertEqual(py_builtins.next_(iterator, 4), 4)
def test_next_tf_iterator(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn(go_out_of_range, with_default):
iterator = iter(dataset_ops.Dataset.range(3))
retval = (
py_builtins.next_(iterator),
py_builtins.next_(iterator),
py_builtins.next_(iterator),
)
if go_out_of_range:
if with_default:
retval += (
py_builtins.next_(iterator,
constant_op.constant(-3, dtype=dtypes.int64)),
py_builtins.next_(iterator,
constant_op.constant(-4, dtype=dtypes.int64)),
)
else:
py_builtins.next_(iterator)
return retval
self.assertAllEqual(
self.evaluate(test_fn(go_out_of_range=False, with_default=None)),
(0, 1, 2))
self.assertAllEqual(
self.evaluate(test_fn(go_out_of_range=True, with_default=True)),
(0, 1, 2, -3, -4))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(test_fn(go_out_of_range=True, with_default=False))
def test_next_tf_iterator_error_checking(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn():
iterator = iter(dataset_ops.Dataset.range(1))
py_builtins.next_(iterator)
py_builtins.next_(iterator, constant_op.constant(-3))
# Dataset.range defaults to int64,
with self.assertRaisesRegex(TypeError, 'default.*int64'):
self.evaluate(test_fn())
def test_next_tf_iterator_error_checking_structures(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn(default_val):
ds = dataset_ops.Dataset.range(1)
ds = ds.map(lambda i: {'a': i + 1, 'b': i + 10})
iterator = iter(ds)
py_builtins.next_(iterator)
py_builtins.next_(iterator, default_val)
default = {
'a': constant_op.constant(3, dtype=dtypes.int64),
}
with self.assertRaisesRegex(TypeError, 'same element structure'):
test_fn(default)
default = {
'a': constant_op.constant(3.0),
'b': [constant_op.constant(30), constant_op.constant(300)]
}
with self.assertRaisesRegex(TypeError, 'same element structure'):
test_fn(default)
default = {
'a': constant_op.constant(3.0),
'b': constant_op.constant(30, dtype=dtypes.int64),
}
with self.assertRaisesRegex(TypeError, 'float32'):
test_fn(default)
def _basic_function_scope(self):
return function_wrappers.FunctionScope(
'test_function_name',
'test_scope', # Note: this must match the name in the `with` statement.
converter.ConversionOptions())
def test_eval_in_original_context(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
self.assertEqual(test_fn(), 1)
def test_eval_in_original_context_inner_function(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
l = 2 # pylint:disable=unused-variable
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
return inner_fn()
self.assertEqual(test_fn(), 2)
def test_locals_in_original_context(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
return py_builtins.locals_in_original_context(test_scope)
locs = test_fn()
self.assertEqual(locs['l'], 1)
def test_locals_in_original_context_inner_function(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
l = 2 # pylint:disable=unused-variable
return py_builtins.locals_in_original_context(test_scope)
return inner_fn()
locs = test_fn()
self.assertEqual(locs['l'], 2)
def test_globals_in_original_context(self):
def test_fn():
with self._basic_function_scope() as test_scope:
return py_builtins.globals_in_original_context(test_scope)
globs = test_fn()
self.assertIs(globs['TestBase'], TestBase)
def test_globals_in_original_context_inner_function(self):
def test_fn():
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
return py_builtins.globals_in_original_context(test_scope)
return inner_fn()
globs = test_fn()
self.assertIs(globs['TestBase'], TestBase)
def test_super_in_original_context_unary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base_unbound = py_builtins.super_in_original_context(
super, (TestSubclass,), test_scope)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.overridden_method(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_binary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base = py_builtins.super_in_original_context(
super, (TestSubclass, self), test_scope)
return test_base.overridden_method(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_niladic_call(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
b = py_builtins.super_in_original_context(super, (), test_scope)
return b.overridden_method(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_caller_with_locals(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
y = 7
with test_case_self._basic_function_scope() as test_scope:
z = 7
return py_builtins.super_in_original_context(
super, (), test_scope).overridden_method(x + y - z)
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
def test_super_in_original_context_inner_function(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
with test_case_self._basic_function_scope() as test_scope:
# Oddly, it's sufficient to use `self` in an inner function
# to gain access to __class__ in this scope.
# TODO(mdan): Is this true across implementations?
# Note: normally, it's illegal to use super() in inner functions (it
# throws an error), but the generated code may create them.
def inner_fn():
return py_builtins.super_in_original_context(
super, (), test_scope).overridden_method(x)
return inner_fn()
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
def test_super_in_original_context_inner_lambda(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
with test_case_self._basic_function_scope() as test_scope:
# Oddly, it's sufficient to use `self` in an inner function
# to gain access to __class__ in this scope.
# TODO(mdan): Is this true across implementations?
# Note: normally, it's illegal to use super() in inner functions (it
# throws an error), but the generated code may create them.
l = lambda: py_builtins.super_in_original_context( # pylint:disable=g-long-lambda
super, (), test_scope).overridden_method(x)
return l()
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
def test_filter(self):
self.assertListEqual(
list(py_builtins.filter_(lambda x: x == 'b', ['a', 'b', 'c'])), ['b'])
self.assertListEqual(
list(py_builtins.filter_(lambda x: x < 3, [3, 2, 1])), [2, 1])
def test_filter_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices([3, 2, 1])
dataset = py_builtins.filter_(lambda x: x < 3, dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 2)
self.assertAllEqual(self.evaluate(iterator.get_next()), 1)
def test_any(self):
self.assertEqual(py_builtins.any_([False, True, False]), True)
self.assertEqual(py_builtins.any_([False, False, False]), False)
def test_any_dataset(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([False, False, False])
self.assertEqual(self.evaluate(py_builtins.any_(dataset_1)), True)
self.assertEqual(self.evaluate(py_builtins.any_(dataset_2)), False)
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2])
with self.assertRaises(ValueError):
py_builtins.any_(dataset_3)
dataset_4 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_zipped = dataset_ops.DatasetV2.zip((dataset_4, dataset_4))
with self.assertRaises(ValueError):
py_builtins.any_(dataset_zipped)
dataset_mixed = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
with self.assertRaises(ValueError):
py_builtins.any_(dataset_mixed)
def test_all(self):
self.assertEqual(py_builtins.all_([False, True, False]), False)
self.assertEqual(py_builtins.all_([True, True, True]), True)
def test_all_dataset(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([True, True, True])
self.assertEqual(self.evaluate(py_builtins.all_(dataset_1)), False)
self.assertEqual(self.evaluate(py_builtins.all_(dataset_2)), True)
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2])
with self.assertRaises(ValueError):
py_builtins.all_(dataset_3)
dataset_4 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_zipped = dataset_ops.DatasetV2.zip((dataset_4, dataset_4))
with self.assertRaises(ValueError):
py_builtins.all_(dataset_zipped)
dataset_mixed = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
with self.assertRaises(ValueError):
py_builtins.all_(dataset_mixed)
def test_sorted(self):
self.assertListEqual(py_builtins.sorted_([2, 3, 1]), [1, 2, 3])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], key=lambda x: -x), [3, 2, 1])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], reverse=True), [3, 2, 1])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], key=lambda x: -x, reverse=True),
[1, 2, 3])
self.assertAllEqual(
py_builtins.sorted_([[4, 3], [2, 1]], key=lambda x: sum(x)),
[[2, 1], [4, 3]])
def test_sorted_tensor(self):
iterable_1 = constant_op.constant([2, 3, 1])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1))), [1, 2, 3])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1, key=lambda x: -x))),
[3, 2, 1])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1, reverse=True))),
[3, 2, 1])
self.assertListEqual(
list(
self.evaluate(
py_builtins.sorted_(iterable_1, key=lambda x: -x,
reverse=True))), [1, 2, 3])
iterable_2 = constant_op.constant([[4, 3], [2, 1]])
with self.assertRaises(ValueError):
py_builtins.sorted_(iterable_2)
with self.assertRaises(ValueError):
py_builtins.sorted_(iterable_2, key=lambda x: -x)
self.assertAllEqual(
list(
self.evaluate(
py_builtins.sorted_(
iterable_2, key=lambda x: math_ops.reduce_sum(x)))),
[[2, 1], [4, 3]])
if __name__ == '__main__':
test.main()
|
PyBuiltinsTest
|
python
|
django__django
|
django/template/base.py
|
{
"start": 13173,
"end": 15841
}
|
class ____:
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def __repr__(self):
return '<%s template_string="%s...", verbatim=%s>' % (
self.__class__.__qualname__,
self.template_string[:20].replace("\n", ""),
self.verbatim,
)
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for token_string in tag_re.split(self.template_string):
if token_string:
result.append(self.create_token(token_string, None, lineno, in_tag))
lineno += token_string.count("\n")
in_tag = not in_tag
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag:
# The [0:2] and [2:-2] ranges below strip off *_TAG_START and
# *_TAG_END. The 2's are hard-coded for performance. Using
# len(BLOCK_TAG_START) would permit BLOCK_TAG_START to be
# different, but it's not likely that the TAG_START values will
# change anytime soon.
token_start = token_string[0:2]
if token_start == BLOCK_TAG_START:
content = token_string[2:-2].strip()
if self.verbatim:
# Then a verbatim block is being processed.
if content != self.verbatim:
return Token(TokenType.TEXT, token_string, position, lineno)
# Otherwise, the current verbatim block is ending.
self.verbatim = False
elif content[:9] in ("verbatim", "verbatim "):
# Then a verbatim block is starting.
self.verbatim = "end%s" % content
return Token(TokenType.BLOCK, content, position, lineno)
if not self.verbatim:
content = token_string[2:-2].strip()
if token_start == VARIABLE_TAG_START:
return Token(TokenType.VAR, content, position, lineno)
# BLOCK_TAG_START was handled above.
assert token_start == COMMENT_TAG_START
return Token(TokenType.COMMENT, content, position, lineno)
return Token(TokenType.TEXT, token_string, position, lineno)
|
Lexer
|
python
|
keras-team__keras
|
keras/src/quantizers/gptq.py
|
{
"start": 11166,
"end": 20175
}
|
class ____:
def __init__(self, layer, config=GPTQConfig(tokenizer=None, dataset=None)):
self.original_layer = layer
self.num_samples = 0
self.config = config
self.quantizer = GPTQQuantizer(
config, compute_dtype=layer.variable_dtype
)
# Explicitly handle each supported layer type
if isinstance(layer, Dense) or (
isinstance(layer, EinsumDense) and layer.kernel.ndim == 2
):
# For a standard Dense layer, the dimensions are straightforward.
self.kernel_shape = layer.kernel.shape
# rows: [input_features]
self.rows = self.kernel_shape[0]
# columns: [output_features]
self.columns = self.kernel_shape[1]
self.layer = layer
# Handle 3D EinsumDense layers (typically from attention blocks).
elif isinstance(layer, EinsumDense) and layer.kernel.ndim == 3:
# For EinsumDense, we determine the effective 2D dimensions.
self.kernel_shape = layer.kernel.shape
shape = list(self.kernel_shape)
d_model_dim_index = shape.index(max(shape))
if d_model_dim_index == 0: # QKV projection case
in_features, heads, head_dim = shape
self.rows, self.columns = (
in_features,
ops.multiply(heads, head_dim),
)
elif d_model_dim_index in [1, 2]: # Attention Output case
heads, head_dim, out_features = shape
self.rows, self.columns = (
ops.multiply(heads, head_dim),
out_features,
)
# Create a temporary object that holds a reshaped
# 2D version of the kernel.
self.layer = types.SimpleNamespace(
kernel=ops.reshape(layer.kernel, (self.rows, self.columns)),
)
else:
# Raise an error if the layer is not supported.
raise TypeError(f"Unsupported layer type for GPTQ: {type(layer)}")
self.hessian = ops.zeros((self.rows, self.rows), dtype="float32")
def update_hessian_with_batch(self, input_batch):
"""
Updates the running average of the Hessian matrix with a new batch.
This method computes the Hessian matrix for a given batch of input
activations and updates the accumulated Hessian (`self.hessian`) using a
numerically stable running average. This allows the Hessian to be
computed over a large dataset without loading all samples into memory
at once.
The input tensor is first reshaped into a 2D matrix [num_samples,
num_features] before the Hessian is calculated.
Args:
input_batch: A 2D or higher-dimensional tensor of input activations
from a calibration batch.
Raises:
ValueError: If the feature dimension of the input tensor
`input_batch` does not match the dimensions of the
pre-initialized Hessian matrix `self.hessian`.
"""
if input_batch is None:
raise ValueError("Input tensor cannot be None.")
if len(input_batch.shape) < 2:
raise ValueError(
"Input tensor must have rank >= 2 "
f"(got rank {len(input_batch.shape)})."
)
if ops.size(input_batch) == 0:
raise ValueError("Input tensor cannot be empty.")
if len(input_batch.shape) > 2:
# [batch, features]
input_batch = ops.reshape(input_batch, (-1, input_batch.shape[-1]))
x = ops.cast(input_batch, "float32")
num_new_samples = ops.shape(x)[0]
num_prev_samples = self.num_samples
total_samples = ops.add(num_prev_samples, num_new_samples)
if ops.shape(self.hessian)[0] != ops.shape(x)[-1]:
raise ValueError(
f"Hessian dimensions ({ops.shape(self.hessian)[0]}) do not "
f"match input features ({ops.shape(x)[-1]})."
)
# gram_matrix: [features, features]
gram_matrix = ops.matmul(ops.transpose(x), x)
# Ensures numerical stability and symmetry in case of large floating
# point activations.
gram_matrix = ops.divide(
ops.add(gram_matrix, ops.transpose(gram_matrix)), 2.0
)
# Decay previous mean and add current per-sample contribution
# (factor 2/N)
if self.num_samples > 0:
self.hessian = ops.multiply(
self.hessian, ops.divide(num_prev_samples, total_samples)
)
self.hessian = ops.add(
self.hessian,
ops.multiply(ops.divide(2.0, total_samples), gram_matrix),
)
self.num_samples = self.num_samples + ops.shape(x)[0] or 0
def quantize_and_correct_layer(
self,
blocksize=128,
):
"""
Performs GPTQ quantization and correction on the layer's weights.
This method implements the core logic of the "Optimal Brain Quant"
(OBQ) method, as applied by GPTQ, to quantize the weights of a single
layer. It iteratively quantizes blocks of weights and corrects for the
quantization error by updating the remaining weights.
The algorithm follows these main steps:
1. Initialization: It optionally reorders the weight columns based
on activation magnitudes (`activation_order=True`) to protect more
salient
weights.
2. Hessian Modification: The Hessian matrix, pre-computed from
calibration data, is dampened to ensure its invertibility and
stability.
3. Iterative Quantization: The function iterates through the
weight columns in blocks (`blocksize`). In each iteration, it:
a. Quantizes one column.
b. Calculates the quantization error.
c. Updates the remaining weights in the *current* block by
distributing the error, using the inverse Hessian.
4. Block-wise Correction: After a block is quantized, the total
error from that block is propagated to the *next* block of weights
to be processed.
5. Finalization: The quantized weights are reordered back if
`activation_order` was used, and the layer's weights are updated.
This implementation is based on the official GPTQ paper and repository.
For more details, see:
- Paper: https://arxiv.org/abs/2210.17323
- Original Code: https://github.com/IST-DASLab/gptq
Args:
blocksize: (int, optional) The size of the weight block to process
at a time. Defaults to 128.
"""
weights_matrix = ops.transpose(self.layer.kernel)
# Dampen the Hessian for Stability
hessian_diagonal = ops.diagonal(self.hessian)
dead_diagonal = ops.equal(hessian_diagonal, 0.0)
hessian_diagonal = ops.where(dead_diagonal, 1.0, hessian_diagonal)
hessian_matrix = ops.add(
self.hessian,
ops.diag(
ops.where(dead_diagonal, 1.0, ops.zeros_like(hessian_diagonal))
),
)
# Add dampening factor to the Hessian diagonal
damping_factor = ops.multiply(
self.config.hessian_damping, ops.mean(hessian_diagonal)
)
hessian_diagonal = ops.add(hessian_diagonal, damping_factor)
hessian_matrix = ops.add(
ops.subtract(
hessian_matrix, ops.diag(ops.diagonal(hessian_matrix))
),
ops.diag(hessian_diagonal),
)
# Compute the inverse Hessian, which is used for error correction
inverse_hessian = linalg.inv(hessian_matrix)
quantized, scale, zero, g_idx = gptq_quantize_matrix(
weights_matrix,
inv_hessian=inverse_hessian,
blocksize=blocksize,
group_size=self.config.group_size,
activation_order=self.config.activation_order,
order_metric=ops.diagonal(hessian_matrix),
compute_scale_zero=partial(self.quantizer.find_params, weight=True),
)
quantized = ops.cast(
quantized, self.original_layer.quantized_kernel.dtype
)
if self.config.weight_bits == 4:
# For 4-bit weights, we need to pack them into bytes
quantized, _, _ = quantizers.pack_int4(
quantized, axis=0, dtype="uint8"
)
del self.original_layer._kernel
self.original_layer.quantized_kernel.assign(quantized)
self.original_layer.kernel_scale.assign(scale)
self.original_layer.kernel_zero.assign(zero)
self.original_layer.g_idx.assign(g_idx)
self.original_layer.is_gptq_calibrated = True
def free(self):
del self.hessian
del self.layer
|
GPTQ
|
python
|
django__django
|
tests/generic_views/test_base.py
|
{
"start": 15540,
"end": 22347
}
|
class ____(LoggingAssertionMixin, SimpleTestCase):
rf = RequestFactory()
def test_no_url(self):
"Without any configuration, returns HTTP 410 GONE"
response = RedirectView.as_view()(self.rf.get("/foo/"))
self.assertEqual(response.status_code, 410)
def test_default_redirect(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.get("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_permanent_redirect(self):
"Permanent redirects are an option"
response = RedirectView.as_view(url="/bar/", permanent=True)(
self.rf.get("/foo/")
)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/bar/")
def test_temporary_redirect(self):
"Temporary redirects are an option"
response = RedirectView.as_view(url="/bar/", permanent=False)(
self.rf.get("/foo/")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_include_args(self):
"GET arguments can be included in the redirected URL"
response = RedirectView.as_view(url="/bar/")(self.rf.get("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
response = RedirectView.as_view(url="/bar/", query_string=True)(
self.rf.get("/foo/?pork=spam")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/?pork=spam")
def test_include_urlencoded_args(self):
"GET arguments can be URL-encoded when included in the redirected URL"
response = RedirectView.as_view(url="/bar/", query_string=True)(
self.rf.get("/foo/?unicode=%E2%9C%93")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/?unicode=%E2%9C%93")
def test_parameter_substitution(self):
"Redirection URLs can be parameterized"
response = RedirectView.as_view(url="/bar/%(object_id)d/")(
self.rf.get("/foo/42/"), object_id=42
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/42/")
def test_named_url_pattern(self):
"Named pattern parameter should reverse to the matching pattern"
response = RedirectView.as_view(pattern_name="artist_detail")(
self.rf.get("/foo/"), pk=1
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers["Location"], "/detail/artist/1/")
def test_named_url_pattern_using_args(self):
response = RedirectView.as_view(pattern_name="artist_detail")(
self.rf.get("/foo/"), 1
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers["Location"], "/detail/artist/1/")
def test_redirect_POST(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.post("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_HEAD(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.head("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_OPTIONS(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.options("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_PUT(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.put("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_PATCH(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.patch("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_DELETE(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url="/bar/")(self.rf.delete("/foo/"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/bar/")
def test_redirect_when_meta_contains_no_query_string(self):
"regression for #16705"
# we can't use self.rf.get because it always sets QUERY_STRING
response = RedirectView.as_view(url="/bar/")(self.rf.request(PATH_INFO="/foo/"))
self.assertEqual(response.status_code, 302)
def test_direct_instantiation(self):
"""
It should be possible to use the view without going through .as_view()
(#21564).
"""
view = RedirectView()
response = view.dispatch(self.rf.head("/foo/"))
self.assertEqual(response.status_code, 410)
def test_gone_response_logged(self):
for path, escaped in [
("/foo/", "/foo/"),
(r"/%1B[1;31mNOW IN RED!!!1B[0m/", r"/\x1b[1;31mNOW IN RED!!!1B[0m/"),
]:
with self.subTest(path=path):
request = self.rf.get(path)
with self.assertLogs("django.request", "WARNING") as handler:
RedirectView().dispatch(request)
self.assertLogRecord(
handler, f"Gone: {escaped}", logging.WARNING, 410, request
)
def test_redirect_with_querry_string_in_destination(self):
response = RedirectView.as_view(url="/bar/?pork=spam", query_string=True)(
self.rf.get("/foo")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers["Location"], "/bar/?pork=spam")
def test_redirect_with_query_string_in_destination_and_request(self):
response = RedirectView.as_view(url="/bar/?pork=spam", query_string=True)(
self.rf.get("/foo/?utm_source=social")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.headers["Location"], "/bar/?pork=spam&utm_source=social"
)
def test_redirect_with_same_query_string_param_will_append_not_replace(self):
response = RedirectView.as_view(url="/bar/?pork=spam", query_string=True)(
self.rf.get("/foo/?utm_source=social&pork=ham")
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.headers["Location"], "/bar/?pork=spam&utm_source=social&pork=ham"
)
|
RedirectViewTest
|
python
|
pytorch__pytorch
|
torch/_subclasses/_fake_tensor_utils.py
|
{
"start": 5458,
"end": 6567
}
|
class ____:
"""
Represents a SymInt in the cached output.
"""
# This is either an `int` which represents the index in the key to copy the
# SymNode from or it's the deconstructed SymNode itself.
value: Union[int, _DeconstructedSymNode]
def __init__(self, value: SymInt, key_path: Optional[int]) -> None:
if key_path is None:
self.value = _DeconstructedSymNode.from_node(value.node)
else:
self.value = key_path
def extract(self, key: _DispatchCacheKey, shape_env: ShapeEnv) -> SymInt:
if isinstance(self.value, _DeconstructedSymNode):
return SymInt(self.value.extract(shape_env))
else:
src = key.key[self.value]
assert isinstance(src, _PySymInputStub) and isinstance(src.value, SymInt)
return src.value
def __repr__(self) -> str:
return f"_SymIntOutputStub({self.value!r})"
def __eq__(self, other: object) -> bool:
raise NotImplementedError
def __hash__(self) -> int:
raise NotImplementedError
@dataclass(slots=True)
|
_SymIntOutputStub
|
python
|
numba__numba
|
numba/cpython/listobj.py
|
{
"start": 4090,
"end": 4809
}
|
class ____(_ListPayloadMixin):
"""
A helper object to access the list attributes given the pointer to the
payload type.
"""
def __init__(self, context, builder, list_type, payload_ptr):
self._context = context
self._builder = builder
self._ty = list_type
self._datamodel = context.data_model_manager[list_type.dtype]
payload_type = types.ListPayload(list_type)
ptrty = context.get_data_type(payload_type).as_pointer()
payload_ptr = builder.bitcast(payload_ptr, ptrty)
payload = context.make_data_helper(builder, payload_type,
ref=payload_ptr)
self._payload = payload
|
ListPayloadAccessor
|
python
|
kamyu104__LeetCode-Solutions
|
Python/house-robber-iv.py
|
{
"start": 822,
"end": 1467
}
|
class ____(object):
def minCapability(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def check(x):
cnt = i = 0
while i < len(nums):
if nums[i] <= x:
cnt += 1
i += 2
else:
i += 1
return cnt >= k
left, right = min(nums), max(nums)
while left <= right:
mid = left + (right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
|
Solution2
|
python
|
pyca__cryptography
|
src/cryptography/x509/certificate_transparency.py
|
{
"start": 438,
"end": 797
}
|
class ____(utils.Enum):
"""
Signature algorithms that are valid for SCTs.
These are exactly the same as SignatureAlgorithm in RFC 5246 (TLS 1.2).
See: <https://datatracker.ietf.org/doc/html/rfc5246#section-7.4.1.4.1>
"""
ANONYMOUS = 0
RSA = 1
DSA = 2
ECDSA = 3
SignedCertificateTimestamp = rust_x509.Sct
|
SignatureAlgorithm
|
python
|
getsentry__sentry
|
src/sentry/hybridcloud/services/tombstone/impl.py
|
{
"start": 446,
"end": 674
}
|
class ____(ControlTombstoneService):
def record_remote_tombstone(self, *, tombstone: RpcTombstone) -> None:
ControlTombstone.record_delete(tombstone.table_name, tombstone.identifier)
|
DatabaseBackedControlTombstoneService
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_slots/SLOT000.py
|
{
"start": 198,
"end": 237
}
|
class ____(str, SubEnum): # Ok
pass
|
Ok
|
python
|
Textualize__textual
|
docs/examples/how-to/render_compose.py
|
{
"start": 974,
"end": 1173
}
|
class ____(App):
"""Simple app to show our custom widget."""
def compose(self) -> ComposeResult:
yield Splash()
if __name__ == "__main__":
app = SplashApp()
app.run()
|
SplashApp
|
python
|
celery__celery
|
t/unit/utils/test_platforms.py
|
{
"start": 1059,
"end": 2161
}
|
class ____:
def test_long_opt(self):
assert _find_option_with_arg(
['--foo=bar'], long_opts=['--foo']) == 'bar'
def test_short_opt(self):
assert _find_option_with_arg(
['-f', 'bar'], short_opts=['-f']) == 'bar'
@t.skip.if_win32
def test_fd_by_path():
test_file = tempfile.NamedTemporaryFile()
try:
keep = fd_by_path([test_file.name])
assert keep == [test_file.file.fileno()]
with patch('os.open') as _open:
_open.side_effect = OSError()
assert not fd_by_path([test_file.name])
finally:
test_file.close()
def test_close_open_fds(patching):
_close = patching('os.close')
fdmax = patching('billiard.compat.get_fdmax')
with patch('os.closerange', create=True) as closerange:
fdmax.return_value = 3
close_open_fds()
if not closerange.called:
_close.assert_has_calls([call(2), call(1), call(0)])
_close.side_effect = OSError()
_close.side_effect.errno = errno.EBADF
close_open_fds()
|
test_find_option_with_arg
|
python
|
pytorch__pytorch
|
test/test_cpp_extensions_stream_and_event.py
|
{
"start": 1047,
"end": 3792
}
|
class ____(common.TestCase):
"""Tests Stream and Event with C++ extensions."""
module = None
def setUp(self):
super().setUp()
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def tearDown(self):
super().tearDown()
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def tearDownClass(cls):
torch.testing._internal.common_utils.remove_cpp_extensions_build_root()
@classmethod
def setUpClass(cls):
torch.testing._internal.common_utils.remove_cpp_extensions_build_root()
build_dir = tempfile.mkdtemp()
# Load the fake device guard impl.
src = f"{os.path.abspath(os.path.dirname(__file__))}/cpp_extensions/mtia_extension.cpp"
cls.module = torch.utils.cpp_extension.load(
name="mtia_extension",
sources=[src],
build_directory=build_dir,
extra_include_paths=[
"cpp_extensions",
"path / with spaces in it",
"path with quote'",
],
is_python_module=False,
verbose=True,
)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_stream_event(self):
s = torch.Stream()
self.assertTrue(s.device_type, int(torch._C._autograd.DeviceType.MTIA))
e = torch.Event(enable_timing=True)
e1 = torch.Event(enable_timing=True)
e1.record()
self.assertTrue(e.device.type, "mtia")
# Should be nullptr by default
self.assertTrue(e.event_id == 0)
s.record_event(event=e)
print(f"recorded event 1: {e}")
self.assertTrue(e.event_id != 0)
# The enable_timing of event created by record_event() is false
e2 = s.record_event()
print(f"recorded event 2: {e2}")
self.assertTrue(e2.event_id != 0)
self.assertTrue(e2.event_id != e.event_id)
e.synchronize()
e1.synchronize()
e2.synchronize()
time_elapsed = e.elapsed_time(e1)
print(f"time elapsed between e and e1: {time_elapsed}")
with self.assertRaisesRegex(
ValueError,
"Both events must be created with argument 'enable_timing=True'",
):
time_elapsed = e.elapsed_time(e2)
old_event_id = e.event_id
e.record(stream=s)
print(f"recorded event 1: {e}")
self.assertTrue(e.event_id == old_event_id)
if __name__ == "__main__":
common.run_tests()
|
TestCppExtensionStreamAndEvent
|
python
|
wandb__wandb
|
wandb/sdk/launch/errors.py
|
{
"start": 121,
"end": 207
}
|
class ____(Error):
"""Raised when Docker daemon is not running."""
|
LaunchDockerError
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 10204,
"end": 14770
}
|
class ____(ShapedArray):
array_abstraction_level = 2
val: np.ndarray
def __init__(self, val):
self.val = val
self.shape = val.shape
self.dtype = val.dtype
@staticmethod
def _bool(tracer):
return bool(tracer.aval.val)
@staticmethod
def _nonzero(tracer):
return bool(tracer.aval.val)
def get_aval(x):
if isinstance(x, Tracer):
return x.aval
elif type(x) in jax_types:
return ConcreteArray(np.asarray(x))
else:
raise TypeError(x)
jax_types = {bool, int, float,
np.bool_, np.int32, np.int64, np.float32, np.float64, np.ndarray}
# -
# Notice that we actually have two `AbstractValue`s for arrays, representing
# different levels of abstraction. A `ShapedArray` represents the set of all
# possible arrays with a given shape and dtype. A `ConcreteArray` represents a
# singleton set consisting of a single array value.
#
# Now that we've set up the interpreter stack, the Trace/Tracer API for
# interpreters, and abstract values, we can come back to implement `bind`:
def bind(prim, *args, **params):
top_trace = find_top_trace(args)
tracers = [full_raise(top_trace, arg) for arg in args]
outs = top_trace.process_primitive(prim, tracers, params)
return [full_lower(out) for out in outs]
# The main action is that we call `find_top_trace` to figure out which
# interpreter should handle this primitive application. We then call that top
# trace's `process_primitive` so that the trace can apply its interpretation
# rule. The calls to `full_raise` just ensure that the inputs are boxed in the
# top trace's `Tracer` instances, and the call to `full_lower` is an optional
# optimization so that we unbox values out of `Tracer`s as much as possible.
# +
import operator as op
def find_top_trace(xs) -> Trace:
top_main = max((x._trace.main for x in xs if isinstance(x, Tracer)),
default=trace_stack[0], key=op.attrgetter('level'))
if dynamic_trace and dynamic_trace.level > top_main.level:
top_main = dynamic_trace
return top_main.trace_type(top_main)
# -
# In words, ignoring the `dynamic_trace` step until Part 3, `find_top_trace`
# returns the highest-level interpreter associated with the `Tracer`s on its
# inputs, and otherwise returns the interpreter at the bottom of the stack
# (which is always an evaluation trace, at least for now). This is a deviation
# from the description above, where we always start by running the interpreter
# at the top of the stack and then work our way down, applying every interpreter
# in the stack. Instead, we're only applying an interpreter when the input
# arguments to a primitive bind are boxed in a `Tracer` corresponding to that
# interpreter. This optimization lets us skip irrelevant transformations, but
# bakes in an assumption that transformations mostly follow data dependence
# (except for the special bottom-of-the-stack interpreter, which interprets
# everything).
#
# An alternative would be to have every interpreter in the stack interpret every
# operation. That's worth exploring! JAX is designed around data dependence in
# large part because that's so natural for automatic differentiation, and JAX's
# roots are in autodiff. But it may be over-fit.
# +
def full_lower(val: Any):
if isinstance(val, Tracer):
return val.full_lower()
else:
return val
def full_raise(trace: Trace, val: Any) -> Tracer:
if not isinstance(val, Tracer):
assert type(val) in jax_types
return trace.pure(val)
level = trace.main.level
if val._trace.main is trace.main:
return val
elif val._trace.main.level < level:
return trace.lift(val)
elif val._trace.main.level > level:
raise Exception(f"Can't lift level {val._trace.main.level} to {level}.")
else: # val._trace.level == level
raise Exception(f"Different traces at same level: {val._trace}, {trace}.")
# -
# The logic in `full_raise` serves to box values into `Tracer`s for a particular
# `Trace`, calling different methods on the `Trace` based on context:
# `Trace.pure` is called on non-`Tracer` constants, and `Trace.lift` is called
# for values that are already `Tracer`s from a lower-level interpreter. These
# two methods could share the same implementation, but by distinguishing them in
# the core logic we can provide more information to the `Trace` subclass.
#
# That's it for the JAX core! Now we can start adding interpreters.
# ### Evaluation interpreter
#
# We'll start with the simplest interpreter: the evaluation interpreter that
# will sit at the bottom of the interpreter stack.
# +
|
ConcreteArray
|
python
|
PrefectHQ__prefect
|
tests/test_context.py
|
{
"start": 1455,
"end": 5603
}
|
class ____(ContextModel):
__var__: ContextVar = ContextVar("test")
x: int
def test_context_enforces_types():
with pytest.raises(ValueError):
ExampleContext(x="hello")
def test_context_get_outside_context_is_null():
assert ExampleContext.get() is None
def test_single_context_object_cannot_be_entered_multiple_times():
context = ExampleContext(x=1)
with context:
with pytest.raises(RuntimeError, match="Context already entered"):
with context:
pass
def test_copied_context_object_can_be_reentered():
context = ExampleContext(x=1)
with context:
with context.model_copy():
assert ExampleContext.get().x == 1
def test_exiting_a_context_more_than_entering_raises():
context = ExampleContext(x=1)
with pytest.raises(RuntimeError, match="Asymmetric use of context"):
with context:
context.__exit__()
def test_context_exit_restores_previous_context():
with ExampleContext(x=1):
with ExampleContext(x=2):
with ExampleContext(x=3):
assert ExampleContext.get().x == 3
assert ExampleContext.get().x == 2
assert ExampleContext.get().x == 1
assert ExampleContext.get() is None
async def test_flow_run_context(prefect_client):
@flow
def foo():
pass
test_task_runner = ThreadPoolTaskRunner()
flow_run = await prefect_client.create_flow_run(foo)
result_store = await ResultStore().update_for_flow(foo)
with FlowRunContext(
flow=foo,
flow_run=flow_run,
client=prefect_client,
task_runner=test_task_runner,
result_store=result_store,
parameters={"x": "y"},
):
ctx = FlowRunContext.get()
assert ctx.flow is foo
assert ctx.flow_run == flow_run
assert ctx.client is prefect_client
assert ctx.task_runner is test_task_runner
assert ctx.result_store == result_store
assert isinstance(ctx.start_time, datetime.datetime)
assert ctx.parameters == {"x": "y"}
async def test_task_run_context(prefect_client, flow_run):
@task
def foo():
pass
task_run = await prefect_client.create_task_run(foo, flow_run.id, dynamic_key="")
result_store = ResultStore()
with TaskRunContext(
task=foo,
task_run=task_run,
client=prefect_client,
result_store=result_store,
parameters={"foo": "bar"},
):
ctx = TaskRunContext.get()
assert ctx.task is foo
assert ctx.task_run == task_run
assert ctx.result_store == result_store
assert isinstance(ctx.start_time, datetime.datetime)
assert ctx.parameters == {"foo": "bar"}
@pytest.fixture
def remove_existing_settings_context():
token = SettingsContext.__var__.set(None)
try:
yield
finally:
SettingsContext.__var__.reset(token)
async def test_get_run_context(prefect_client, local_filesystem):
@flow
def foo():
pass
@task
def bar():
pass
test_task_runner = ThreadPoolTaskRunner()
flow_run = await prefect_client.create_flow_run(foo)
task_run = await prefect_client.create_task_run(bar, flow_run.id, dynamic_key="")
with pytest.raises(RuntimeError):
get_run_context()
with pytest.raises(MissingContextError):
get_run_context()
with FlowRunContext(
flow=foo,
flow_run=flow_run,
client=prefect_client,
task_runner=test_task_runner,
result_store=await ResultStore().update_for_flow(foo),
parameters={"x": "y"},
) as flow_ctx:
assert get_run_context() is flow_ctx
with TaskRunContext(
task=bar,
task_run=task_run,
client=prefect_client,
result_store=await get_result_store().update_for_task(bar, _sync=False),
parameters={"foo": "bar"},
) as task_ctx:
assert get_run_context() is task_ctx, "Task context takes precedence"
assert get_run_context() is flow_ctx, "Flow context is restored and retrieved"
|
ExampleContext
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 114554,
"end": 114670
}
|
class ____(MaybeAlignPartitions):
_parameters = ["frame", "cond", "other"]
_expr_cls: AnyType = Mask
|
MaskAlign
|
python
|
pandas-dev__pandas
|
pandas/plotting/_matplotlib/core.py
|
{
"start": 72200,
"end": 75297
}
|
class ____(MPLPlot):
@property
def _kind(self) -> Literal["pie"]:
return "pie"
_layout_type = "horizontal"
def __init__(self, data: Series | DataFrame, kind=None, **kwargs) -> None:
data = data.fillna(value=0)
lt_zero = data < 0
if isinstance(data, ABCDataFrame) and lt_zero.any().any():
raise ValueError(f"{self._kind} plot doesn't allow negative values")
elif isinstance(data, ABCSeries) and lt_zero.any():
raise ValueError(f"{self._kind} plot doesn't allow negative values")
MPLPlot.__init__(self, data, kind=kind, **kwargs)
@classmethod
def _validate_log_kwd(
cls,
kwd: str,
value: bool | None | Literal["sym"],
) -> bool | None | Literal["sym"]:
super()._validate_log_kwd(kwd=kwd, value=value)
if value is not False:
warnings.warn(
f"PiePlot ignores the '{kwd}' keyword",
UserWarning,
stacklevel=find_stack_level(),
)
return False
def _validate_color_args(self, color, colormap) -> None:
# TODO: warn if color is passed and ignored?
return None
def _make_plot(self, fig: Figure) -> None:
colors = self._get_colors(num_colors=len(self.data), color_kwds="colors")
self.kwds.setdefault("colors", colors)
for i, (label, y) in enumerate(self._iter_data(data=self.data)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ""
else:
return label
idx = [pprint_thing(v) for v in self.data.index]
labels = kwds.pop("labels", idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [
blank_labeler(left, value)
for left, value in zip(labels, y, strict=True)
]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get("autopct", None) is not None:
# error: Need more than 2 values to unpack (3 expected)
patches, texts, autotexts = results # type: ignore[misc]
else:
# error: Too many values to unpack (2 expected, 3 provided)
patches, texts = results # type: ignore[misc]
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for _patch, _leglabel in zip(patches, leglabels, strict=True):
self._append_legend_handles_labels(_patch, _leglabel)
def _post_plot_logic(self, ax: Axes, data) -> None:
pass
|
PiePlot
|
python
|
zarr-developers__zarr-python
|
tests/test_dtype/test_npy/test_bytes.py
|
{
"start": 1898,
"end": 3168
}
|
class ____(BaseTestZDType):
test_cls = RawBytes
valid_dtype = (np.dtype("|V10"),)
invalid_dtype = (
np.dtype(np.int8),
np.dtype(np.float64),
np.dtype("|S10"),
)
valid_json_v2 = ({"name": "|V10", "object_codec_id": None},)
valid_json_v3 = (
{"name": "raw_bytes", "configuration": {"length_bytes": 1}},
{"name": "raw_bytes", "configuration": {"length_bytes": 8}},
)
invalid_json_v2 = (
"|V",
"|S10",
"|f8",
)
invalid_json_v3 = (
{"name": "r10"},
{"name": "r-80"},
)
scalar_v2_params = (
(RawBytes(length=1), "AA=="),
(RawBytes(length=2), "YWI="),
(RawBytes(length=4), "YWJjZA=="),
)
scalar_v3_params = (
(RawBytes(length=1), "AA=="),
(RawBytes(length=2), "YWI="),
(RawBytes(length=4), "YWJjZA=="),
)
cast_value_params = (
(RawBytes(length=1), b"\x00", np.void(b"\x00")),
(RawBytes(length=2), b"ab", np.void(b"ab")),
(RawBytes(length=4), b"abcd", np.void(b"abcd")),
)
invalid_scalar_params = ((RawBytes(length=1), 1.0),)
item_size_params = (
RawBytes(length=1),
RawBytes(length=4),
RawBytes(length=10),
)
|
TestRawBytes
|
python
|
doocs__leetcode
|
solution/1100-1199/1160.Find Words That Can Be Formed by Characters/Solution.py
|
{
"start": 0,
"end": 286
}
|
class ____:
def countCharacters(self, words: List[str], chars: str) -> int:
cnt = Counter(chars)
ans = 0
for w in words:
wc = Counter(w)
if all(cnt[c] >= v for c, v in wc.items()):
ans += len(w)
return ans
|
Solution
|
python
|
spyder-ide__spyder
|
external-deps/python-lsp-server/test/plugins/test_completion.py
|
{
"start": 1418,
"end": 20771
}
|
class ____(NamedTuple):
document: str
position: dict
label: str
expected: lsp.CompletionItemKind
# fmt: off
TYPE_CASES: dict[str, TypeCase] = {
"variable": TypeCase(
document="test = 1\ntes",
position={"line": 1, "character": 3},
label="test",
expected=lsp.CompletionItemKind.Variable,
),
"function": TypeCase(
document="def test():\n pass\ntes",
position={"line": 2, "character": 3},
label="test()",
expected=lsp.CompletionItemKind.Function,
),
"keyword": TypeCase(
document="fro",
position={"line": 0, "character": 3},
label="from",
expected=lsp.CompletionItemKind.Keyword,
),
"file": TypeCase(
document='"' + __file__[:-2].replace('"', '\\"') + '"',
position={"line": 0, "character": len(__file__) - 2},
label=Path(__file__).name + '"',
expected=lsp.CompletionItemKind.File,
),
"module": TypeCase(
document="import statis",
position={"line": 0, "character": 13},
label="statistics",
expected=lsp.CompletionItemKind.Module,
),
"class": TypeCase(
document="KeyErr",
position={"line": 0, "character": 6},
label="KeyError",
expected=lsp.CompletionItemKind.Class,
),
"property": TypeCase(
document=(
"class A:\n"
" @property\n"
" def test(self):\n"
" pass\n"
"A().tes"
),
position={"line": 4, "character": 5},
label="test",
expected=lsp.CompletionItemKind.Property,
),
}
# fmt: on
@pytest.mark.parametrize("case", list(TYPE_CASES.values()), ids=list(TYPE_CASES.keys()))
def test_jedi_completion_type(case, config, workspace):
# property support was introduced in 0.18
if case.expected == lsp.CompletionItemKind.Property and JEDI_VERSION.startswith(
"0.17"
):
return
doc = Document(DOC_URI, workspace, case.document)
items = pylsp_jedi_completions(config, doc, case.position)
items = {i["label"]: i for i in items}
assert items[case.label]["kind"] == case.expected
def test_jedi_completion(config, workspace) -> None:
# Over 'i' in os.path.isabs(...)
com_position = {"line": 1, "character": 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
labels = [i["label"] for i in items]
assert "isfile(path)" in labels
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {"line": 1, "character": 1000})
def test_jedi_completion_item_resolve(config, workspace) -> None:
# Over the blank line
com_position = {"line": 8, "character": 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({"plugins": {"jedi_completion": {"resolve_at_most": math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c["label"]: c for c in completions}
documented_hello_item = items["documented_hello()"]
assert "documentation" not in documented_hello_item
assert "detail" not in documented_hello_item
resolved_documented_hello = pylsp_jedi_completion_item_resolve(
doc._config, completion_item=documented_hello_item, document=doc
)
expected_doc = {
"kind": "markdown",
"value": "```python\ndocumented_hello()\n```\n\n\nSends a polite greeting",
}
assert resolved_documented_hello["documentation"] == expected_doc
def test_jedi_completion_with_fuzzy_enabled(config, workspace) -> None:
# Over 'i' in os.path.isabs(...)
config.update({"plugins": {"jedi_completion": {"fuzzy": True}}})
com_position = {"line": 1, "character": 15}
doc = Document(DOC_URI, workspace, DOC)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
expected = "isabs(s)"
assert items[0]["label"] == expected
# Test we don't throw with big character
pylsp_jedi_completions(config, doc, {"line": 1, "character": 1000})
def test_jedi_completion_resolve_at_most(config, workspace) -> None:
# Over 'i' in os.path.isabs(...)
com_position = {"line": 1, "character": 15}
doc = Document(DOC_URI, workspace, DOC)
# Do not resolve any labels
config.update({"plugins": {"jedi_completion": {"resolve_at_most": 0}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i["label"] for i in items}
assert "isabs" in labels
# Resolve all items
config.update({"plugins": {"jedi_completion": {"resolve_at_most": math.inf}}})
items = pylsp_jedi_completions(config, doc, com_position)
labels = {i["label"] for i in items}
assert "isfile(path)" in labels
def test_rope_completion(config, workspace) -> None:
# Over 'i' in os.path.isabs(...)
com_position = {"line": 1, "character": 15}
workspace.put_document(DOC_URI, source=DOC)
doc = workspace.get_document(DOC_URI)
items = pylsp_rope_completions(config, workspace, doc, com_position)
assert items
assert items[0]["label"] == "isabs"
def test_jedi_completion_ordering(config, workspace) -> None:
# Over the blank line
com_position = {"line": 8, "character": 0}
doc = Document(DOC_URI, workspace, DOC)
config.update({"plugins": {"jedi_completion": {"resolve_at_most": math.inf}}})
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c["label"]: c["sortText"] for c in completions}
# And that 'hidden' functions come after unhidden ones
assert items["hello()"] < items["_a_hello()"]
def test_jedi_property_completion(config, workspace) -> None:
# Over the 'w' in 'print Hello().world'
com_position = {"line": 18, "character": 15}
doc = Document(DOC_URI, workspace, DOC)
completions = pylsp_jedi_completions(config, doc, com_position)
items = {c["label"]: c["sortText"] for c in completions}
# Ensure we can complete the 'world' property
assert "world" in list(items.keys())[0]
def test_jedi_method_completion(config, workspace) -> None:
# Over the 'y' in 'print Hello().every'
com_position = {"line": 20, "character": 19}
doc = Document(DOC_URI, workspace, DOC)
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update({"plugins": {"jedi_completion": {"include_params": True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [
completion
for completion in completions
if completion["label"] == "everyone(a, b, c, d)"
][0]
# Ensure we only generate snippets for positional args
assert everyone_method["insertTextFormat"] == lsp.InsertTextFormat.Snippet
assert everyone_method["insertText"] == "everyone(${1:a}, ${2:b})$0"
# Disable param snippets
config.update({"plugins": {"jedi_completion": {"include_params": False}}})
completions = pylsp_jedi_completions(config, doc, com_position)
everyone_method = [
completion
for completion in completions
if completion["label"] == "everyone(a, b, c, d)"
][0]
assert "insertTextFormat" not in everyone_method
assert everyone_method["insertText"] == "everyone"
@pytest.mark.skipif(
PY2 or (sys.platform.startswith("linux") and os.environ.get("CI") is not None),
reason="Test in Python 3 and not on CIs on Linux because wheels don't work on them.",
)
def test_pyqt_completion(config, workspace) -> None:
# Over 'QA' in 'from PyQt6.QtWidgets import QApplication'
doc_pyqt = "from PyQt6.QtWidgets import QA"
com_position = {"line": 0, "character": len(doc_pyqt)}
doc = Document(DOC_URI, workspace, doc_pyqt)
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions is not None
def test_numpy_completions(config, workspace) -> None:
doc_numpy = "import numpy as np; np."
com_position = {"line": 0, "character": len(doc_numpy)}
doc = Document(DOC_URI, workspace, doc_numpy)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any("array" in i["label"] for i in items)
def test_pandas_completions(config, workspace) -> None:
doc_pandas = "import pandas as pd; pd."
com_position = {"line": 0, "character": len(doc_pandas)}
doc = Document(DOC_URI, workspace, doc_pandas)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any("DataFrame" in i["label"] for i in items)
def test_matplotlib_completions(config, workspace) -> None:
doc_mpl = "import matplotlib.pyplot as plt; plt."
com_position = {"line": 0, "character": len(doc_mpl)}
doc = Document(DOC_URI, workspace, doc_mpl)
items = pylsp_jedi_completions(config, doc, com_position)
assert items
assert any("plot" in i["label"] for i in items)
def test_snippets_completion(config, workspace) -> None:
doc_snippets = "from collections import defaultdict \na=defaultdict"
com_position = {"line": 0, "character": 35}
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update({"plugins": {"jedi_completion": {"include_params": True}}})
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]["insertText"] == "defaultdict"
com_position = {"line": 1, "character": len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]["insertText"] == "defaultdict($0)"
assert completions[0]["insertTextFormat"] == lsp.InsertTextFormat.Snippet
def test_snippets_completion_at_most(config, workspace) -> None:
doc_snippets = "from collections import defaultdict \na=defaultdict"
doc = Document(DOC_URI, workspace, doc_snippets)
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update({"plugins": {"jedi_completion": {"include_params": True}}})
config.update({"plugins": {"jedi_completion": {"resolve_at_most": 0}}})
com_position = {"line": 1, "character": len(doc_snippets)}
completions = pylsp_jedi_completions(config, doc, com_position)
assert completions[0]["insertText"] == "defaultdict"
assert not completions[0].get("insertTextFormat", None)
def test_completion_with_class_objects(config, workspace) -> None:
doc_text = "class FOOBAR(Object): pass\nFOOB"
com_position = {"line": 1, "character": 4}
doc = Document(DOC_URI, workspace, doc_text)
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update(
{
"plugins": {
"jedi_completion": {
"include_params": True,
"include_class_objects": True,
}
}
}
)
completions = pylsp_jedi_completions(config, doc, com_position)
assert len(completions) == 2
assert completions[0]["label"] == "FOOBAR"
assert completions[0]["kind"] == lsp.CompletionItemKind.Class
assert completions[1]["label"] == "FOOBAR object"
assert completions[1]["kind"] == lsp.CompletionItemKind.TypeParameter
def test_completion_with_function_objects(config, workspace) -> None:
doc_text = "def foobar(): pass\nfoob"
com_position = {"line": 1, "character": 4}
doc = Document(DOC_URI, workspace, doc_text)
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update(
{
"plugins": {
"jedi_completion": {
"include_params": True,
"include_function_objects": True,
}
}
}
)
completions = pylsp_jedi_completions(config, doc, com_position)
assert len(completions) == 2
assert completions[0]["label"] == "foobar()"
assert completions[0]["kind"] == lsp.CompletionItemKind.Function
assert completions[1]["label"] == "foobar() object"
assert completions[1]["kind"] == lsp.CompletionItemKind.TypeParameter
def test_snippet_parsing(config, workspace) -> None:
doc = "divmod"
completion_position = {"line": 0, "character": 6}
doc = Document(DOC_URI, workspace, doc)
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update({"plugins": {"jedi_completion": {"include_params": True}}})
completions = pylsp_jedi_completions(config, doc, completion_position)
out = "divmod(${1:x}, ${2:y})$0"
if JEDI_VERSION == "0.18.0":
out = "divmod(${1:a}, ${2:b})$0"
assert completions[0]["insertText"] == out
def test_multiline_import_snippets(config, workspace) -> None:
document = "from datetime import(\n date,\n datetime)\na=date"
doc = Document(DOC_URI, workspace, document)
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update({"plugins": {"jedi_completion": {"include_params": True}}})
position = {"line": 1, "character": 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]["insertText"] == "date"
position = {"line": 2, "character": 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]["insertText"] == "datetime"
def test_multiline_snippets(config, workspace) -> None:
document = "from datetime import\\\n date,\\\n datetime \na=date"
doc = Document(DOC_URI, workspace, document)
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update({"plugins": {"jedi_completion": {"include_params": True}}})
position = {"line": 1, "character": 5}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]["insertText"] == "date"
position = {"line": 2, "character": 9}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]["insertText"] == "datetime"
def test_multistatement_snippet(config, workspace) -> None:
config.capabilities["textDocument"] = {
"completion": {"completionItem": {"snippetSupport": True}}
}
config.update({"plugins": {"jedi_completion": {"include_params": True}}})
document = "a = 1; from datetime import date"
doc = Document(DOC_URI, workspace, document)
position = {"line": 0, "character": len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]["insertText"] == "date"
document = "from math import fmod; a = fmod"
doc = Document(DOC_URI, workspace, document)
position = {"line": 0, "character": len(document)}
completions = pylsp_jedi_completions(config, doc, position)
assert completions[0]["insertText"] == "fmod(${1:x}, ${2:y})$0"
def test_jedi_completion_extra_paths(tmpdir, workspace) -> None:
# Create a tempfile with some content and pass to extra_paths
temp_doc_content = """
def spam():
pass
"""
p = tmpdir.mkdir("extra_path")
extra_paths = [str(p)]
p = p.join("foo.py")
p.write(temp_doc_content)
# Content of doc to test completion
doc_content = """import foo
foo.s"""
doc = Document(DOC_URI, workspace, doc_content)
# After 'foo.s' without extra paths
com_position = {"line": 1, "character": 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra paths
settings = {"pylsp": {"plugins": {"jedi": {"extra_paths": extra_paths}}}}
doc.update_config(settings)
# After 'foo.s' with extra paths
com_position = {"line": 1, "character": 5}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]["label"] == "spam()"
@pytest.mark.skipif(
PY2 or not LINUX or not CI, reason="tested on linux and python 3 only"
)
def test_jedi_completion_environment(workspace) -> None:
# Content of doc to test completion
doc_content = """import logh
"""
doc = Document(DOC_URI, workspace, doc_content)
# After 'import logh' with default environment
com_position = {"line": 0, "character": 11}
assert os.path.isdir("/tmp/pyenv/")
settings = {"pylsp": {"plugins": {"jedi": {"environment": None}}}}
doc.update_config(settings)
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions is None
# Update config extra environment
env_path = "/tmp/pyenv/bin/python"
settings = {"pylsp": {"plugins": {"jedi": {"environment": env_path}}}}
doc.update_config(settings)
# After 'import logh' with new environment
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]["label"] == "loghub"
resolved = pylsp_jedi_completion_item_resolve(doc._config, completions[0], doc)
assert "changelog generator" in resolved["documentation"]["value"].lower()
def test_document_path_completions(tmpdir, workspace_other_root_path) -> None:
# Create a dummy module out of the workspace's root_path and try to get
# completions for it in another file placed next to it.
module_content = """
def foo():
pass
"""
p = tmpdir.join("mymodule.py")
p.write(module_content)
# Content of doc to test completion
doc_content = """import mymodule
mymodule.f"""
doc_path = str(tmpdir) + os.path.sep + "myfile.py"
doc_uri = uris.from_fs_path(doc_path)
doc = Document(doc_uri, workspace_other_root_path, doc_content)
com_position = {"line": 1, "character": 10}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]["label"] == "foo()"
def test_file_completions(workspace, tmpdir) -> None:
# Create directory and a file to get completions for them.
# Note: `tmpdir`` is the root dir of the `workspace` fixture. That's why we use
# it here.
tmpdir.mkdir("bar")
file = tmpdir.join("foo.txt")
file.write("baz")
# Content of doc to test completion
doc_content = '"'
doc = Document(DOC_URI, workspace, doc_content)
# Request for completions
com_position = {"line": 0, "character": 1}
completions = pylsp_jedi_completions(doc._config, doc, com_position)
# Check completions
assert len(completions) == 2
assert [c["kind"] == lsp.CompletionItemKind.File for c in completions]
assert completions[0]["insertText"] == (
("bar" + "\\") if os.name == "nt" else ("bar" + "/")
)
assert completions[1]["insertText"] == 'foo.txt"'
# When snippets are supported, ensure that path separators are escaped.
support_snippet = {
"textDocument": {"completion": {"completionItem": {"snippetSupport": True}}}
}
doc._config.capabilities.update(support_snippet)
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert completions[0]["insertText"] == (
("bar" + "\\\\") if os.name == "nt" else ("bar" + "\\/")
)
assert completions[1]["insertText"] == 'foo.txt"'
|
TypeCase
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-shared/dagster_shared/yaml_utils/source_position.py
|
{
"start": 6056,
"end": 6354
}
|
class ____(NamedTuple):
"""A tree-like object (like a JSON-structured dict) and an accompanying SourcePositionTree.
Each tree node in the SourcePositionTree is expected to correspond to in value.
"""
value: Any
source_position_tree: SourcePositionTree
|
ValueAndSourcePositionTree
|
python
|
run-llama__llama_index
|
llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-elasticsearch/llama_index/storage/kvstore/elasticsearch/base.py
|
{
"start": 2132,
"end": 9780
}
|
class ____(BaseKVStore):
"""
Elasticsearch Key-Value store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
def __init__(
self,
index_name: str,
es_client: Optional[Any],
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
) -> None:
nest_asyncio.apply()
"""Init a ElasticsearchKVStore."""
try:
from elasticsearch import AsyncElasticsearch
except ImportError:
raise ImportError(IMPORT_ERROR_MSG)
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client: AsyncElasticsearch = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
return "llama_index-py-vs"
async def _create_index_if_not_exists(self, index_name: str) -> None:
"""
Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
index_settings = {"mappings": {"_source": {"enabled": True}}}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def put(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
self.put_all([(key, val)], collection=collection)
async def aput(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
await self.aput_all([(key, val)], collection=collection)
def put_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
return asyncio.get_event_loop().run_until_complete(
self.aput_all(kv_pairs, collection, batch_size)
)
async def aput_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
await self._create_index_if_not_exists(collection)
# Prepare documents with '_id' set to the key for batch insertion
docs = [{"_id": key, **value} for key, value in kv_pairs]
# Insert documents in batches
for batch in (
docs[i : i + batch_size] for i in range(0, len(docs), batch_size)
):
requests = []
for doc in batch:
doc_id = doc["_id"]
doc.pop("_id")
logger.debug(doc)
request = {
"_op_type": "index",
"_index": collection,
**doc,
"_id": doc_id,
}
requests.append(request)
await async_bulk(self.client, requests, chunk_size=batch_size, refresh=True)
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
return asyncio.get_event_loop().run_until_complete(self.aget(key, collection))
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
await self._create_index_if_not_exists(collection)
try:
response = await self._client.get(index=collection, id=key, source=True)
return response.body["_source"]
except elasticsearch.NotFoundError:
return None
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
return asyncio.get_event_loop().run_until_complete(self.aget_all(collection))
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
await self._create_index_if_not_exists(collection)
result = {}
q = {"query": {"match_all": {}}}
async for doc in async_scan(client=self._client, index=collection, query=q):
doc_id = doc["_id"]
content = doc["_source"]
result[doc_id] = content
return result
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(key, collection)
)
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
await self._create_index_if_not_exists(collection)
try:
response = await self._client.delete(index=collection, id=key)
return response.body["result"] == "deleted"
except elasticsearch.NotFoundError:
return False
|
ElasticsearchKVStore
|
python
|
pytorch__pytorch
|
test/distributed/test_local_tensor.py
|
{
"start": 20130,
"end": 21119
}
|
class ____(LocalTensorTestBase):
world_size = 8
def test_dtensor_addmm(self):
with LocalTensorMode(self.world_size):
device_mesh = self.build_device_mesh()
shard_spec = [Shard(0)]
replica_spec = [Replicate()]
tensor_to_shard = torch.randn(12, 8)
mat1 = distribute_tensor(tensor_to_shard, device_mesh, shard_spec)
tensor_to_replicate = torch.randn(8, 4)
mat2 = distribute_tensor(tensor_to_replicate, device_mesh, replica_spec)
input_tensor = torch.randn(4)
input = distribute_tensor(input_tensor, device_mesh, replica_spec)
dist_res = torch.addmm(input, mat1, mat2)
local_res = torch.addmm(input_tensor, tensor_to_shard, tensor_to_replicate)
full_tensor = dist_res.full_tensor()
self.assertEqual(full_tensor, local_res)
from torch.distributed._local_tensor._c10d import local_p2p_op, wait_all
|
TestLocalTensorWorld8
|
python
|
PrefectHQ__prefect
|
src/prefect/server/events/actions.py
|
{
"start": 55957,
"end": 57357
}
|
class ____(WorkQueueAction, ExternalDataAction):
_action_description: ClassVar[str]
async def act(self, triggered_action: "TriggeredAction") -> None:
work_queue_id = await self.work_queue_id_to_use(triggered_action)
self._resulting_related_resources += [
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.work-queue.{work_queue_id}",
"prefect.resource.role": "target",
}
)
]
logger.info(
self._action_description,
extra={
"work_queue_id": work_queue_id,
**self.logging_context(triggered_action),
},
)
async with await self.orchestration_client(triggered_action) as orchestration:
response = await self.command(
orchestration, work_queue_id, triggered_action
)
self._result_details["status_code"] = response.status_code
if response.status_code >= 300:
raise ActionFailed(self.reason_from_response(response))
@abc.abstractmethod
async def command(
self,
orchestration: "OrchestrationClient",
work_queue_id: UUID,
triggered_action: "TriggeredAction",
) -> Response:
"""Issue the command to the Work Queue"""
|
WorkQueueCommandAction
|
python
|
davidhalter__jedi
|
test/refactor/extract_function.py
|
{
"start": 3298,
"end": 3466
}
|
class ____(int):
@staticmethod
def f(x):
#? 16 text {'new_name': 'ab'}
return ab()
# -------------------------------------------------- in-class-1
|
X
|
python
|
pytorch__pytorch
|
torch/nn/parallel/_functions.py
|
{
"start": 1233,
"end": 1778
}
|
class ____(Function):
@staticmethod
def forward(ctx, destination, num_inputs, *grads):
ctx.target_gpus = [
grads[i].get_device() for i in range(0, len(grads), num_inputs)
]
grads_ = [grads[i : i + num_inputs] for i in range(0, len(grads), num_inputs)]
return comm.reduce_add_coalesced(grads_, destination)
@staticmethod
def backward(ctx, *grad_outputs):
return (
None,
None,
) + Broadcast.apply(ctx.target_gpus, *grad_outputs)
|
ReduceAddCoalesced
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets_tests/snippet_checks/utils.py
|
{
"start": 9793,
"end": 17964
}
|
class ____:
def __init__(
self,
snapshot_base_dir: Path,
should_update_snippets: bool,
global_snippet_replace_regexes: Sequence[tuple[str, str]],
) -> None:
self._should_update_snippets = should_update_snippets
self._snip_number = 0
self._snapshot_base_dir = snapshot_base_dir
self._global_snippet_replace_regexes = global_snippet_replace_regexes
def get_next_snip_number(self) -> int:
self._snip_number += 1
return self._snip_number
def run_command_and_snippet_output(
self,
cmd: Union[str, Sequence[str]],
snippet_path: Optional[Union[Path, str]] = None,
snippet_replace_regex: Optional[Sequence[tuple[str, str]]] = None,
custom_comparison_fn: Optional[Callable[[str, str], bool]] = None,
ignore_output: bool = False,
expect_error: bool = False,
print_cmd: Optional[str] = None,
input_str: Optional[str] = None,
) -> str:
"""Run the given command and check that the output matches the contents of the snippet
at `snippet_path`. If `update_snippets` is `True`, updates the snippet file with the
output of the command.
Args:
cmd (Union[str, Sequence[str]): The command to run.
snippet_path (Optional[Union[Path, str]]): Relative path to the snippet file to check/update.
update_snippets (Optional[bool]): Whether to update the snippet file with the output.
snippet_replace_regex (Optional[Sequence[tuple[str, str]]]): A list of regex
substitution pairs to apply to the generated snippet file before checking it against the
existing version. Note these will apply to both the command and the output of the
command. Useful to remove dynamic content, e.g. the temporary directory path or
timestamps.
custom_comparison_fn (Optional[Callable]): A function that takes the output of the
command and the snippet contents and returns whether they match. Useful for some
commands (e.g. tree) where the output is frustratingly platform-dependent.
ignore_output (bool): Whether to ignore the output of the command when updating the snippet.
Useful when the output is too verbose or not meaningful.
"""
output = _run_command(cmd, expect_error=expect_error, input_str=input_str)
if snippet_path:
print_cmd = print_cmd if print_cmd else str(cmd)
if ignore_output:
contents = print_cmd
else:
contents = f"{print_cmd}\n\n{output}"
_assert_matches_or_update_snippet(
contents=contents,
snippet_path=self._snapshot_base_dir / snippet_path,
update_snippets=self._should_update_snippets,
snippet_replace_regex=[
*self._global_snippet_replace_regexes,
*(snippet_replace_regex or []),
],
custom_comparison_fn=custom_comparison_fn,
)
return output
def check_file(
self,
file_path: Union[Path, str],
snippet_path: Optional[Union[Path, str]] = None,
snippet_replace_regex: Optional[Sequence[tuple[str, str]]] = None,
):
"""Check that the contents of the file at `file_path` match the contents of the snippet
at `snippet_path`. If `update_snippets` is `True`, updates the snippet file with the
contents of the file.
Used for steps where we want to show the user the contents of a file (e.g. one that's
generated by the framework, or by output).
Args:
file_path (Union[Path, str]): The path to the file to check.
snippet_path (Optional[Union[Path, str]]): Relative path to the snippet file to check/update.
update_snippets (Optional[bool]): Whether to update the snippet file with the file contents.
snippet_replace_regex (Optional[Sequence[tuple[str, str]]]): A list of regex
substitution pairs to apply to the file contents before checking it against the snippet.
Useful to remove dynamic content, e.g. the temporary directory path or timestamps.
"""
file_path = Path(file_path)
assert file_path.exists(), f"Expected file {file_path} to exist"
contents = file_path.read_text()
if snippet_path:
_assert_matches_or_update_snippet(
contents=contents,
snippet_path=self._snapshot_base_dir / snippet_path,
update_snippets=self._should_update_snippets,
snippet_replace_regex=[
*self._global_snippet_replace_regexes,
*(snippet_replace_regex or []),
],
custom_comparison_fn=None,
)
def create_file(
self,
file_path: Union[Path, str],
contents: str,
snippet_path: Optional[Union[Path, str]] = None,
snippet_replace_regex: Optional[Sequence[tuple[str, str]]] = None,
):
"""Create a file with the given contents. If `snippet_path` is provided, outputs
the contents to the snippet file too.
Used for steps where the user is expected to create a file.
Args:
file_path (Union[Path, str]): The path to the file to create.
contents (str): The contents to write to the file.
snippet_path (Optional[Union[Path, str]]): Relative path to the snippet file to update.
"""
file_path = Path(file_path)
file_path.parent.mkdir(parents=True, exist_ok=True)
file_path.write_text(contents)
if snippet_path:
_assert_matches_or_update_snippet(
contents=contents,
snippet_path=self._snapshot_base_dir / snippet_path,
update_snippets=True,
snippet_replace_regex=[
*self._global_snippet_replace_regexes,
*(snippet_replace_regex or []),
],
custom_comparison_fn=None,
)
@contextmanager
def isolated_snippet_generation_environment(
should_update_snippets: bool,
snapshot_base_dir: Path,
global_snippet_replace_regexes: Optional[Sequence[tuple[str, str]]] = None,
clear_snapshot_dir_before_update: bool = True,
) -> Iterator[SnippetGenerationContext]:
with (
_get_snippet_working_dir() as tempdir,
pushd(tempdir),
TemporaryDirectory() as dg_cli_config_folder,
TemporaryDirectory() as dagster_cloud_config_folder,
environ(
{
**SNIPPET_ENV,
"DG_CLI_CONFIG": str(Path(dg_cli_config_folder) / "dg.toml"),
"DAGSTER_CLOUD_CLI_CONFIG": str(
Path(dagster_cloud_config_folder) / "config.yaml"
),
}
),
):
dg_config_path = Path(dg_cli_config_folder) / "dg.toml"
dg_config_path.write_text(
"""
[cli.telemetry]
enabled = false
"""
)
if (
should_update_snippets
and snapshot_base_dir.exists()
and clear_snapshot_dir_before_update
):
shutil.rmtree(snapshot_base_dir)
snapshot_base_dir.mkdir(parents=True, exist_ok=True)
yield SnippetGenerationContext(
snapshot_base_dir=snapshot_base_dir,
should_update_snippets=should_update_snippets,
global_snippet_replace_regexes=global_snippet_replace_regexes or [],
)
def screenshot_page(
get_webdriver: "Callable[[], webdriver.Chrome]",
url: str,
path: Path,
update_screenshots: bool,
width: Optional[int] = 1024,
height: Optional[int] = 768,
) -> None:
if not update_screenshots:
return
webdriver = get_webdriver()
webdriver.set_window_size(width, height)
webdriver.get(url)
webdriver.save_screenshot(path)
|
SnippetGenerationContext
|
python
|
aio-libs__aiohttp
|
aiohttp/client_exceptions.py
|
{
"start": 7018,
"end": 7992
}
|
class ____(ClientError, ValueError):
"""Invalid URL.
URL used for fetching is malformed, e.g. it doesn't contains host
part.
"""
# Derive from ValueError for backward compatibility
def __init__(self, url: StrOrURL, description: str | None = None) -> None:
# The type of url is not yarl.URL because the exception can be raised
# on URL(url) call
self._url = url
self._description = description
if description:
super().__init__(url, description)
else:
super().__init__(url)
@property
def url(self) -> StrOrURL:
return self._url
@property
def description(self) -> "str | None":
return self._description
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self}>"
def __str__(self) -> str:
if self._description:
return f"{self._url} - {self._description}"
return str(self._url)
|
InvalidURL
|
python
|
networkx__networkx
|
networkx/exception.py
|
{
"start": 2446,
"end": 2560
}
|
class ____(NetworkXException):
"""Exception raised if requested node is not present in the graph"""
|
NodeNotFound
|
python
|
huggingface__transformers
|
src/transformers/models/csm/modeling_csm.py
|
{
"start": 16534,
"end": 18438
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: CsmConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = CsmAttention(config=config, layer_idx=layer_idx)
self.mlp = CsmMLP(config)
self.input_layernorm = CsmRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = CsmRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring(
custom_intro="""
The bare Csm Model outputting raw hidden-states without any specific head on top.
"""
)
@auto_docstring
|
CsmDecoderLayer
|
python
|
django__django
|
tests/serializers/models/data.py
|
{
"start": 2983,
"end": 3165
}
|
class ____(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(unique=True, max_length=30)
|
UniqueAnchor
|
python
|
dask__distributed
|
distributed/variable.py
|
{
"start": 4587,
"end": 8956
}
|
class ____:
"""Distributed Global Variable
This allows multiple clients to share futures and data between each other
with a single mutable variable. All metadata is sequentialized through the
scheduler. Race conditions can occur.
Values must be either Futures or msgpack-encodable data (ints, lists,
strings, etc..) All data will be kept and sent through the scheduler, so
it is wise not to send too much. If you want to share a large amount of
data then ``scatter`` it and share the future instead.
Parameters
----------
name: string (optional)
Name used by other clients and the scheduler to identify the variable.
If not given, a random name will be generated.
client: Client (optional)
Client used for communication with the scheduler.
If not given, the default global client will be used.
Examples
--------
>>> from dask.distributed import Client, Variable # doctest: +SKIP
>>> client = Client() # doctest: +SKIP
>>> x = Variable('x') # doctest: +SKIP
>>> x.set(123) # docttest: +SKIP
>>> x.get() # docttest: +SKIP
123
>>> future = client.submit(f, x) # doctest: +SKIP
>>> x.set(future) # doctest: +SKIP
See Also
--------
Queue: shared multi-producer/multi-consumer queue between clients
"""
def __init__(self, name=None, client=None):
self._client = client
self.name = name or "variable-" + uuid.uuid4().hex
@property
def client(self):
if not self._client:
try:
self._client = get_client()
except ValueError:
pass
return self._client
def _verify_running(self):
if not self.client:
raise RuntimeError(
f"{type(self)} object not properly initialized. This can happen"
" if the object is being deserialized outside of the context of"
" a Client or Worker."
)
async def _set(self, value, timeout):
if isinstance(value, Future):
await self.client.scheduler.variable_set(
key=value.key, name=self.name, timeout=timeout
)
else:
await self.client.scheduler.variable_set(
data=value, name=self.name, timeout=timeout
)
def set(self, value, timeout="30 s", **kwargs):
"""Set the value of this variable
Parameters
----------
value : Future or object
Must be either a Future or a msgpack-encodable value
"""
self._verify_running()
return self.client.sync(self._set, value, timeout=timeout, **kwargs)
async def _get(self, timeout=None):
d = await self.client.scheduler.variable_get(
timeout=timeout, name=self.name, client=self.client.id
)
if d["type"] == "Future":
value = Future(d["value"], self.client, state=d["state"])
if d["state"] == "erred":
value._state.set_error(d["exception"], d["traceback"])
self.client._send_to_scheduler(
{
"op": "variable-future-received-confirm",
"name": self.name,
"key": d["value"],
"token": d["token"],
}
)
else:
value = d["value"]
return value
def get(self, timeout=None, **kwargs):
"""Get the value of this variable
Parameters
----------
timeout : number or string or timedelta, optional
Time in seconds to wait before timing out.
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
"""
self._verify_running()
timeout = parse_timedelta(timeout)
return self.client.sync(self._get, timeout=timeout, **kwargs)
def delete(self):
"""Delete this variable
Caution, this affects all clients currently pointing to this variable.
"""
self._verify_running()
if self.client.status == "running": # TODO: can leave zombie futures
self.client._send_to_scheduler({"op": "variable_delete", "name": self.name})
def __reduce__(self):
return Variable, (self.name,)
|
Variable
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/check_ops_test.py
|
{
"start": 16201,
"end": 21225
}
|
class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_equal(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1., name="y")
with ops.control_dependencies(
[check_ops.assert_near(x, y, message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_rtol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(1., name="x")
y = constant_op.constant(1. + 2 * eps, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_32_bit_due_to_default_atol(self):
eps = np.finfo(np.float32).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(0., name="x")
y = constant_op.constant(0. + 2 * eps, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, rtol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_rtol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(1., name="x", dtype=np.float64)
y = constant_op.constant(1. + 2 * eps, name="y", dtype=np.float64)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_64_bit_due_to_default_atol(self):
eps = np.finfo(np.float64).eps
# Default rtol/atol is 10*eps
x = constant_op.constant(0., name="x", dtype=np.float64)
y = constant_op.constant(0. + 2 * eps, name="y", dtype=np.float64)
with ops.control_dependencies(
[check_ops.assert_near(x, y, rtol=0., message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_due_to_custom_rtol(self):
x = constant_op.constant(1., name="x")
y = constant_op.constant(1.1, name="y")
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0., rtol=0.5,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_close_enough_due_to_custom_atol(self):
x = constant_op.constant(0., name="x")
y = constant_op.constant(0.1, name="y", dtype=np.float32)
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0.5, rtol=0.,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies([check_ops.assert_near(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_atol_violated(self):
x = constant_op.constant(10., name="x")
y = constant_op.constant(10.2, name="y")
with self.assertRaisesOpError( # pylint:disable=g-error-prone-assert-raises
"x and y not equal to tolerance"):
with ops.control_dependencies(
[check_ops.assert_near(x, y, atol=0.1,
message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_default_rtol_violated(self):
x = constant_op.constant(0.1, name="x")
y = constant_op.constant(0.0, name="y")
with self.assertRaisesOpError( # pylint:disable=g-error-prone-assert-raises
"x and y not equal to tolerance"):
with ops.control_dependencies(
[check_ops.assert_near(x, y, message="failure message")]):
out = array_ops.identity(x)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1., 2.])
t2 = constant_op.constant([1., 2.])
x = check_ops.assert_near(t1, t2)
assert x is None
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_complex(self):
x = constant_op.constant(1. + 0.1j, name="x")
y = constant_op.constant(1.1 + 0.1j, name="y")
with ops.control_dependencies([
check_ops.assert_near(
x, y, atol=0., rtol=0.5, message="failure message")
]):
out = array_ops.identity(x)
self.evaluate(out)
|
AssertAllCloseTest
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/iter/combining.py
|
{
"start": 10869,
"end": 15224
}
|
class ____(IterDataPipe):
r"""
Iterable Datapipe that is a child of a main DataPipe.
The instance of this class will pass its instance_id to get the next value from its main DataPipe.
Note:
ChildDataPipe, like all other IterDataPipe, follows the single iterator per IterDataPipe constraint.
Since ChildDataPipes share a common buffer, when an iterator is created for one of the ChildDataPipes,
the previous iterators for all ChildDataPipes must be invalidated, with the exception when a ChildDataPipe
hasn't had an iterator created from it since the last invalidation. See the example below.
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> # Singler Iterator per IteraDataPipe Invalidation
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(10))
>>> cdp1, cdp2 = source_dp.fork(num_instances=2)
>>> it1, it2 = iter(cdp1), iter(cdp2)
>>> it3 = iter(cdp1)
>>> # The line above invalidates `it1` and `it2`, and resets `ForkerIterDataPipe`.
>>> it4 = iter(cdp2)
>>> # The line above doesn't invalidate `it3`, because an iterator for `cdp2` hasn't been created since
>>> # the last invalidation.
Args:
main_datapipe: Main DataPipe with a method 'get_next_element_by_instance(instance_id)'
instance_id: integer identifier of this instance
"""
_is_child_datapipe: bool = True
def __init__(self, main_datapipe: IterDataPipe, instance_id: int) -> None:
if not isinstance(main_datapipe, _ContainerTemplate):
raise AssertionError("main_datapipe must implement _ContainerTemplate")
# pyrefly: ignore [bad-assignment]
self.main_datapipe: IterDataPipe = main_datapipe
self.instance_id = instance_id
def __iter__(self):
# Note that the logic behind setting iterator ID and `reset` are handled within `hook_iterator`
# We want to separate the code for reset and yield, so that 'reset' executes before __next__ is called
return self.main_datapipe.get_next_element_by_instance(self.instance_id)
def __len__(self) -> int:
return self.main_datapipe.get_length_by_instance(self.instance_id)
# This method is called by `hook_iterator` in `_typing.py`.
def _set_main_datapipe_valid_iterator_id(self) -> int:
r"""
Update the valid iterator ID for both this DataPipe object and `main_datapipe`.
`main_datapipe.reset()` is called when the ID is incremented to a new generation.
"""
# 1. First time any child iterator is created
if self.main_datapipe._valid_iterator_id is None:
self.main_datapipe._valid_iterator_id = 0 # type: ignore[attr-defined]
# 2. This instance was already in the same generation as `main_datapipe`,
# we need to increment the ID further by 1
elif self.main_datapipe._valid_iterator_id == self._valid_iterator_id: # type: ignore[has-type]
self.main_datapipe._valid_iterator_id += 1 # type: ignore[attr-defined]
# Whenever a new generation of iterator is created, the `main_datapipe` must reset
if not self.main_datapipe.is_every_instance_exhausted():
warnings.warn(
"Some child DataPipes are not exhausted when __iter__ is called. We are resetting "
"the buffer and each child DataPipe will read from the start again.",
UserWarning,
stacklevel=2,
)
self.main_datapipe.reset()
# 3. Otherwise, the iterator is behind the others, so it will just need to catch up by setting
# the instance's iterator to match that of `main_datapipe`
self._valid_iterator_id = self.main_datapipe._valid_iterator_id
return self._valid_iterator_id
# This method is called by `hook_iterator` in `_typing.py`.
def _check_valid_iterator_id(self, iterator_id) -> bool:
r"""Check the valid iterator ID against that of DataPipe object and that of `main_datapipe`."""
return (
iterator_id == self._valid_iterator_id
and iterator_id == self.main_datapipe._valid_iterator_id
)
@functional_datapipe("demux")
|
_ChildDataPipe
|
python
|
coleifer__peewee
|
tests/postgres.py
|
{
"start": 1148,
"end": 1300
}
|
class ____(TestModel):
key = CharField(max_length=100, primary_key=True)
timestamps = ArrayField(TimestampField, convert_values=True)
|
ArrayTSModel
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 756,
"end": 1025
}
|
class ____(sgqlc.types.Enum):
"""Properties by which Audit Log connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order audit log entries by timestamp
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT",)
|
AuditLogOrderField
|
python
|
getsentry__sentry
|
tests/sentry/uptime/subscriptions/test_tasks.py
|
{
"start": 15327,
"end": 15701
}
|
class ____(ConfigPusherTestMixin):
def test_with_region(self) -> None:
subscription_id = uuid4().hex
region_slug = "default"
send_uptime_config_deletion(region_slug, subscription_id)
self.assert_redis_config(
region_slug, UptimeSubscription(subscription_id=subscription_id), "delete", None
)
|
SendUptimeConfigDeletionTest
|
python
|
great-expectations__great_expectations
|
great_expectations/metrics/batch/batch.py
|
{
"start": 241,
"end": 398
}
|
class ____(Metric[_MetricResult], kw_only=True):
row_condition: Optional[StrictStr] = None
condition_parser: Optional[ConditionParser] = None
|
BatchMetric
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/metrics.py
|
{
"start": 111736,
"end": 113354
}
|
class ____(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
This is the crossentropy metric class to be used when there are only two
label classes (0 and 1).
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional )Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
smoothed, meaning the confidence on label values are relaxed.
e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for
label `0` and `0.9` for label `1`".
Standalone usage:
>>> m = tf.keras.metrics.BinaryCrossentropy()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
0.81492424
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.9162905
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.BinaryCrossentropy()])
```
"""
def __init__(self,
name='binary_crossentropy',
dtype=None,
from_logits=False,
label_smoothing=0):
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing)
|
BinaryCrossentropy
|
python
|
astropy__astropy
|
astropy/convolution/tests/test_convolve_kernels.py
|
{
"start": 1566,
"end": 4403
}
|
class ____:
@pytest.mark.parametrize("kernel", KERNELS)
def test_centered_makekernel(self, kernel):
"""
Test smoothing of an image with a single positive pixel
"""
shape = kernel.array.shape
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary="fill")
c1 = convolve(x, kernel, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize("kernel", KERNELS)
def test_random_makekernel(self, kernel):
"""
Test smoothing of an image made of random noise
"""
shape = kernel.array.shape
x = np.random.randn(*shape)
c2 = convolve_fft(x, kernel, boundary="fill")
c1 = convolve(x, kernel, boundary="fill")
# not clear why, but these differ by a couple ulps...
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize("shape", SHAPES_ODD)
@pytest.mark.parametrize("width", WIDTHS)
def test_uniform_smallkernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Uses a simple, small kernel
"""
if width % 2 == 0:
# convolve does not accept odd-shape kernels
return
kernel = np.ones([width, width])
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary="fill")
c1 = convolve(x, kernel, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize("shape", SHAPES_ODD)
@pytest.mark.parametrize("width", [1, 3, 5])
def test_smallkernel_Box2DKernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Compares a small uniform kernel to the Box2DKernel
"""
kernel1 = np.ones([width, width]) / float(width) ** 2
kernel2 = Box2DKernel(width, mode="oversample", factor=10)
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel2, boundary="fill")
c1 = convolve_fft(x, kernel1, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
c2 = convolve(x, kernel2, boundary="fill")
c1 = convolve(x, kernel1, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
def test_gaussian_2d_kernel_quantity():
# Make sure that the angle can be a quantity
kernel1 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=45 * u.deg)
kernel2 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=np.pi / 4)
assert_allclose(kernel1.array, kernel2.array)
|
Test2DConvolutions
|
python
|
pennersr__django-allauth
|
allauth/account/forms.py
|
{
"start": 20463,
"end": 20626
}
|
class ____(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
|
UserForm
|
python
|
doocs__leetcode
|
solution/2800-2899/2898.Maximum Linear Stock Score/Solution.py
|
{
"start": 0,
"end": 190
}
|
class ____:
def maxScore(self, prices: List[int]) -> int:
cnt = Counter()
for i, x in enumerate(prices):
cnt[x - i] += x
return max(cnt.values())
|
Solution
|
python
|
huggingface__transformers
|
tests/models/patchtsmixer/test_modeling_patchtsmixer.py
|
{
"start": 2057,
"end": 7349
}
|
class ____:
def __init__(
self,
context_length: int = 32,
patch_length: int = 8,
num_input_channels: int = 3,
patch_stride: int = 8,
# d_model: int = 128,
hidden_size: int = 8,
# num_layers: int = 8,
num_hidden_layers: int = 2,
expansion_factor: int = 2,
dropout: float = 0.5,
mode: str = "common_channel",
gated_attn: bool = True,
norm_mlp="LayerNorm",
swin_hier: int = 0,
# masking related
mask_type: str = "forecast",
random_mask_ratio=0.5,
mask_patches: list = [2, 3],
forecast_mask_ratios: list = [1, 1],
mask_value=0,
masked_loss: bool = False,
mask_mode: str = "mask_before_encoder",
channel_consistent_masking: bool = True,
scaling: str | bool | None = "std",
# Head related
head_dropout: float = 0.2,
# forecast related
prediction_length: int = 16,
out_channels: int | None = None,
# Classification/regression related
# num_labels: int = 3,
num_targets: int = 3,
output_range: list | None = None,
head_aggregation: str | None = None,
# Trainer related
batch_size=13,
is_training=True,
seed_number=42,
post_init=True,
num_parallel_samples=4,
):
self.num_input_channels = num_input_channels
self.context_length = context_length
self.patch_length = patch_length
self.patch_stride = patch_stride
# self.d_model = d_model
self.hidden_size = hidden_size
self.expansion_factor = expansion_factor
# self.num_layers = num_layers
self.num_hidden_layers = num_hidden_layers
self.dropout = dropout
self.mode = mode
self.gated_attn = gated_attn
self.norm_mlp = norm_mlp
self.swin_hier = swin_hier
self.scaling = scaling
self.head_dropout = head_dropout
# masking related
self.mask_type = mask_type
self.random_mask_ratio = random_mask_ratio
self.mask_patches = mask_patches
self.forecast_mask_ratios = forecast_mask_ratios
self.mask_value = mask_value
self.channel_consistent_masking = channel_consistent_masking
self.mask_mode = mask_mode
self.masked_loss = masked_loss
# patching related
self.patch_last = True
# forecast related
self.prediction_length = prediction_length
self.out_channels = out_channels
# classification/regression related
# self.num_labels = num_labels
self.num_targets = num_targets
self.output_range = output_range
self.head_aggregation = head_aggregation
# Trainer related
self.batch_size = batch_size
self.is_training = is_training
self.seed_number = seed_number
self.post_init = post_init
self.num_parallel_samples = num_parallel_samples
def get_config(self):
config_ = PatchTSMixerConfig(
num_input_channels=self.num_input_channels,
context_length=self.context_length,
patch_length=self.patch_length,
patch_stride=self.patch_stride,
# d_model = self.d_model,
d_model=self.hidden_size,
expansion_factor=self.expansion_factor,
# num_layers = self.num_layers,
num_layers=self.num_hidden_layers,
dropout=self.dropout,
mode=self.mode,
gated_attn=self.gated_attn,
norm_mlp=self.norm_mlp,
swin_hier=self.swin_hier,
scaling=self.scaling,
head_dropout=self.head_dropout,
mask_type=self.mask_type,
random_mask_ratio=self.random_mask_ratio,
mask_patches=self.mask_patches,
forecast_mask_ratios=self.forecast_mask_ratios,
mask_value=self.mask_value,
channel_consistent_masking=self.channel_consistent_masking,
mask_mode=self.mask_mode,
masked_loss=self.masked_loss,
prediction_length=self.prediction_length,
out_channels=self.out_channels,
# num_labels=self.num_labels,
num_targets=self.num_targets,
output_range=self.output_range,
head_aggregation=self.head_aggregation,
post_init=self.post_init,
)
self.num_patches = config_.num_patches
return config_
def prepare_patchtsmixer_inputs_dict(self, config):
_past_length = config.context_length
# bs, n_vars, num_patch, patch_length
# [bs x context_length x n_vars]
past_values = floats_tensor([self.batch_size, _past_length, self.num_input_channels])
inputs_dict = {
"past_values": past_values,
}
return inputs_dict
def prepare_config_and_inputs(self):
config = self.get_config()
inputs_dict = self.prepare_patchtsmixer_inputs_dict(config)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
|
PatchTSMixerModelTester
|
python
|
ijl__orjson
|
test/test_ujson.py
|
{
"start": 93,
"end": 10730
}
|
class ____:
def test_doubleLongIssue(self):
sut = {"a": -4342969734183514}
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
assert sut == decoded
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
assert sut == decoded
def test_doubleLongDecimalIssue(self):
sut = {"a": -12345678901234.56789012}
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
assert sut == decoded
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
assert sut == decoded
def test_encodeDecodeLongDecimal(self):
sut = {"a": -528656961.4399388}
encoded = orjson.dumps(sut)
orjson.loads(encoded)
def test_decimalDecodeTest(self):
sut = {"a": 4.56}
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
pytest.approx(sut["a"], decoded["a"])
def test_encodeDictWithUnicodeKeys(self):
val = {
"key1": "value1",
"key1": "value1",
"key1": "value1",
"key1": "value1",
"key1": "value1",
"key1": "value1",
}
orjson.dumps(val)
val = {
"بن": "value1",
"بن": "value1",
"بن": "value1",
"بن": "value1",
"بن": "value1",
"بن": "value1",
"بن": "value1",
}
orjson.dumps(val)
def test_encodeArrayOfNestedArrays(self):
val = [[[[]]]] * 20 # type: ignore
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
def test_encodeArrayOfDoubles(self):
val = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
def test_encodeStringConversion2(self):
val = "A string \\ / \b \f \n \r \t"
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == b'"A string \\\\ / \\b \\f \\n \\r \\t"'
assert val == orjson.loads(output)
def test_decodeUnicodeConversion(self):
pass
def test_encodeUnicodeConversion1(self):
val = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_encodeControlEscaping(self):
val = "\x19"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert val == dec
assert enc == orjson.dumps(val)
def test_encodeUnicodeConversion2(self):
val = "\xe6\x97\xa5\xd1\x88"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_encodeUnicodeSurrogatePair(self):
val = "\xf0\x90\x8d\x86"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_encodeUnicode4BytesUTF8(self):
val = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_encodeUnicode4BytesUTF8Highest(self):
val = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def testEncodeUnicodeBMP(self):
s = "\U0001f42e\U0001f42e\U0001f42d\U0001f42d" # 🐮🐮🐭🐭
orjson.dumps(s)
json.dumps(s)
assert json.loads(json.dumps(s)) == s
assert orjson.loads(orjson.dumps(s)) == s
def testEncodeSymbols(self):
s = "\u273f\u2661\u273f" # ✿♡✿
encoded = orjson.dumps(s)
encoded_json = json.dumps(s)
decoded = orjson.loads(encoded)
assert s == decoded
encoded = orjson.dumps(s)
# json outputs an unicode object
encoded_json = json.dumps(s, ensure_ascii=False)
assert encoded == encoded_json.encode("utf-8")
decoded = orjson.loads(encoded)
assert s == decoded
def test_encodeArrayInArray(self):
val = [[[[]]]] # type: ignore
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeIntConversion(self):
val = 31337
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeIntNegConversion(self):
val = -31337
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeLongNegConversion(self):
val = -9223372036854775808
output = orjson.dumps(val)
orjson.loads(output)
orjson.loads(output)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeListConversion(self):
val = [1, 2, 3, 4]
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
def test_encodeDictConversion(self):
val = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
def test_encodeNoneConversion(self):
val = None
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeTrueConversion(self):
val = True
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeFalseConversion(self):
val = False
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeToUTF8(self):
val = b"\xe6\x97\xa5\xd1\x88".decode("utf-8")
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_decodeFromUnicode(self):
val = '{"obj": 31337}'
dec1 = orjson.loads(val)
dec2 = orjson.loads(str(val))
assert dec1 == dec2
def test_decodeJibberish(self):
val = "fdsa sda v9sa fdsa"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenArrayStart(self):
val = "["
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenObjectStart(self):
val = "{"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenArrayEnd(self):
val = "]"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenObjectEnd(self):
val = "}"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeObjectDepthTooBig(self):
val = "{" * (1024 * 1024)
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeStringUnterminated(self):
val = '"TESTING'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeStringUntermEscapeSequence(self):
val = '"TESTING\\"'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeStringBadEscape(self):
val = '"TESTING\\"'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeTrueBroken(self):
val = "tru"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeFalseBroken(self):
val = "fa"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeNullBroken(self):
val = "n"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenDictKeyTypeLeakTest(self):
val = '{{1337:""}}'
for _ in range(1000):
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenDictLeakTest(self):
val = '{{"key":"}'
for _ in range(1000):
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenListLeakTest(self):
val = "[[[true"
for _ in range(1000):
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeDictWithNoKey(self):
val = "{{{{31337}}}}"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeDictWithNoColonOrValue(self):
val = '{{{{"key"}}}}'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeDictWithNoValue(self):
val = '{{{{"key":}}}}'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeNumericIntPos(self):
val = "31337"
assert 31337 == orjson.loads(val)
def test_decodeNumericIntNeg(self):
assert -31337 == orjson.loads("-31337")
def test_encodeNullCharacter(self):
val = "31337 \x00 1337"
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
val = "\x00"
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
assert b'" \\u0000\\r\\n "' == orjson.dumps(" \u0000\r\n ")
def test_decodeNullCharacter(self):
val = '"31337 \\u0000 31337"'
assert orjson.loads(val) == json.loads(val)
def test_decodeEscape(self):
base = "\u00e5".encode("utf-8")
quote = b'"'
val = quote + base + quote
assert json.loads(val) == orjson.loads(val)
def test_decodeBigEscape(self):
for _ in range(10):
base = "\u00e5".encode("utf-8")
quote = b'"'
val = quote + (base * 1024 * 1024 * 2) + quote
assert json.loads(val) == orjson.loads(val)
|
TestUltraJSON
|
python
|
huggingface__transformers
|
src/transformers/models/metaclip_2/modular_metaclip_2.py
|
{
"start": 953,
"end": 4616
}
|
class ____(CLIPTextConfig):
r"""
This is the configuration class to store the configuration of a [`MetaClip2TextModel`]. It is used to instantiate
a MetaClip2 text encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MetaClip2
[facebook/metaclip-2-worldwide-huge-quickgelu](https://huggingface.co/facebook/metaclip-2-worldwide-huge-quickgelu) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the MetaClip2 text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`MetaClip2TextModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 49406):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 49407):
End of stream token id.
Example:
```python
>>> from transformers import MetaClip2TextConfig, MetaClip2TextModel
>>> # Initializing a MetaClip2TextConfig with facebook/metaclip-2-worldwide-huge-quickgelu style configuration
>>> configuration = MetaClip2TextConfig()
>>> # Initializing a MetaClip2TextModel (with random weights) from the facebook/metaclip-2-worldwide-huge-quickgelu style configuration
>>> model = MetaClip2TextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
|
MetaClip2TextConfig
|
python
|
getsentry__sentry
|
src/sentry/utils/imports.py
|
{
"start": 25,
"end": 690
}
|
class ____(dict[str, object]):
def __missing__(self, key: str) -> object:
if "." not in key:
return __import__(key)
module_name, class_name = key.rsplit(".", 1)
module = __import__(module_name, {}, {}, [class_name])
handler = getattr(module, class_name)
# We cache a NoneType for missing imports to avoid repeated lookups
self[key] = handler
return handler
_cache = ModuleProxyCache()
def import_string(path: str) -> Any:
"""
Path must be module.path.ClassName
>>> cls = import_string('sentry.models.Group')
"""
result = _cache[path]
return result
|
ModuleProxyCache
|
python
|
justquick__django-activity-stream
|
actstream/tests/test_drf.py
|
{
"start": 1299,
"end": 5166
}
|
class ____(BaseDRFTestCase):
def test_actstream(self):
actions = self.get(reverse('action-list'))
assert len(actions) == 11
follows = self.get(reverse('follow-list'))
assert len(follows) == 6
@skipUnless(DRF_SETTINGS['HYPERLINK_FIELDS'], 'Related hyperlinks disabled')
def test_hyperlink_fields(self):
actions = self.get(reverse('action-list'))
action = self.get(reverse('action-detail', args=[actions[0]["id"]]))
assert action['timestamp'].startswith('2000-01-01T00:00:00')
assert action['actor'].startswith('http')
@skipUnless(DRF_SETTINGS['EXPAND_FIELDS'], 'Related expanded fields disabled')
def test_expand_fields(self):
actions = self.get(reverse('action-list'))
action = self.get(reverse('action-detail', args=[actions[0]["id"]]))
assert action['timestamp'].startswith('2000-01-01T00:00:00')
self.assertIsInstance(action['target'], dict)
assert action['target']['username'] == 'Three'
def test_urls(self):
self._check_urls('actions', 'follows')
def test_permissions(self):
users = self.get(reverse('myuser-list'))
assert str(users['detail']) == 'Authentication credentials were not provided.'
users = self.get(reverse('myuser-list'), auth=True)
assert len(users) == 4
def test_model_fields(self):
sites = self.get(reverse('site-list'))
self.assertSetEqual(sites[0].keys(), ['id', 'domain'])
def test_viewset(self):
resp = self.client.head(reverse('group-foo'))
assert resp.status_code == 420
assert resp.data == ['chill']
def test_my_actions(self):
actions = self.get(reverse('action-my-actions'), auth=True)
assert len(actions) == 3
assert actions[0]['verb'] == 'joined'
def test_model(self):
actions = self.get(reverse('action-model-stream', args=[self.group_ct.id]), auth=True)
assert len(actions) == 7
assert actions[0]['verb'] == 'joined'
def test_target(self):
actions = self.get(reverse('action-target-stream', args=[self.group_ct.id, self.another_group.id]), auth=True)
assert len(actions) == 2
assert actions[0]['target']['name'] == actions[1]['target']['name'] == 'NiceGroup'
def test_action_object(self):
signals.action.send(self.user1, verb='created comment',
action_object=self.comment, target=self.group,
timestamp=self.testdate)[0][1]
url = reverse('action-action-object-stream', args=[self.site_ct.id, self.comment.id])
actions = self.get(url, auth=True)
assert len(actions) == 1
assert actions[0]['verb'] == 'created comment'
def test_any(self):
url = reverse('action-any-stream', args=[self.user_ct.id, self.auth_user.id])
actions = self.get(url, auth=True)
assert len(actions) == 4
assert actions[0]['verb'] == 'joined'
def test_following(self):
actions = self.get(reverse('action-following'), auth=True)
assert len(actions) == 2
assert actions[0]['actor']['username'] == actions[1]['actor']['username'] == 'Two'
def test_action_send(self):
body = {
'verb': 'mentioned',
'description': 'talked about a group',
'target_content_type_id': self.group_ct.id,
'target_object_id': self.group.id
}
post = self.auth_client.post(reverse('action-send'), body)
assert post.status_code == 201
action = Action.objects.first()
assert action.description == body['description']
assert action.verb == body['verb']
assert action.actor == self.user1
assert action.target == self.group
@skipUnless(USE_DRF, 'Django rest framework disabled')
|
DRFActionTestCase
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/instrumentation/events/llm.py
|
{
"start": 4491,
"end": 5121
}
|
class ____(BaseEvent):
"""
LLMChatInProgressEvent.
Args:
messages (List[ChatMessage]): List of chat messages.
response (ChatResponse): Chat response currently being streamed.
"""
messages: List[ChatMessage]
response: ChatResponse
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMChatInProgressEvent"
def model_dump(self, **kwargs: Any) -> Dict[str, Any]:
if isinstance(self.response.raw, BaseModel):
self.response.raw = self.response.raw.model_dump()
return super().model_dump(**kwargs)
|
LLMChatInProgressEvent
|
python
|
Textualize__textual
|
src/textual/css/tokenize.py
|
{
"start": 5563,
"end": 6965
}
|
class ____:
EXPECT: ClassVar[Expect] = expect_root_scope
STATE_MAP: ClassVar[dict[str, Expect]] = {}
STATE_PUSH: ClassVar[dict[str, Expect]] = {}
STATE_POP: ClassVar[dict[str, str]] = {}
def __init__(self) -> None:
self._expect: Expect = self.EXPECT
super().__init__()
def expect(self, expect: Expect) -> None:
self._expect = expect
def __call__(self, code: str, read_from: CSSLocation) -> Iterable[Token]:
tokenizer = Tokenizer(code, read_from=read_from)
get_token = tokenizer.get_token
get_state = self.STATE_MAP.get
state_stack: list[Expect] = []
while True:
expect = self._expect
token = get_token(expect)
name = token.name
if name in self.STATE_MAP:
self._expect = get_state(token.name, expect)
elif name in self.STATE_PUSH:
self._expect = self.STATE_PUSH[name]
state_stack.append(expect)
elif name in self.STATE_POP:
if state_stack:
self._expect = state_stack.pop()
else:
self._expect = self.EXPECT
token = token._replace(name="end_tag")
yield token
continue
yield token
if name == "eof":
break
|
TokenizerState
|
python
|
fastai__fastai
|
fastai/layers.py
|
{
"start": 6957,
"end": 9492
}
|
class ____(nn.Sequential):
"Module grouping `BatchNorm1d`, `Dropout` and `Linear` layers"
def __init__(self, n_in, n_out, bn=True, p=0., act=None, lin_first=False):
layers = [BatchNorm(n_out if lin_first else n_in, ndim=1)] if bn else []
if p != 0: layers.append(nn.Dropout(p))
lin = [nn.Linear(n_in, n_out, bias=not bn)]
if act is not None: lin.append(act)
layers = lin+layers if lin_first else layers+lin
super().__init__(*layers)
# %% ../nbs/01_layers.ipynb 51
def sigmoid(input, eps=1e-7):
"Same as `torch.sigmoid`, plus clamping to `(eps,1-eps)"
return input.sigmoid().clamp(eps,1-eps)
# %% ../nbs/01_layers.ipynb 52
def sigmoid_(input, eps=1e-7):
"Same as `torch.sigmoid_`, plus clamping to `(eps,1-eps)"
return input.sigmoid_().clamp_(eps,1-eps)
# %% ../nbs/01_layers.ipynb 53
from torch.nn.init import kaiming_uniform_,uniform_,xavier_uniform_,normal_
# %% ../nbs/01_layers.ipynb 54
def vleaky_relu(input, inplace=True):
"`F.leaky_relu` with 0.3 slope"
return F.leaky_relu(input, negative_slope=0.3, inplace=inplace)
# %% ../nbs/01_layers.ipynb 55
for o in F.relu,nn.ReLU,F.relu6,nn.ReLU6,F.leaky_relu,nn.LeakyReLU:
o.__default_init__ = kaiming_uniform_
# %% ../nbs/01_layers.ipynb 56
for o in F.sigmoid,nn.Sigmoid,F.tanh,nn.Tanh,sigmoid,sigmoid_:
o.__default_init__ = xavier_uniform_
# %% ../nbs/01_layers.ipynb 57
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func and hasattr(m, 'weight'): func(m.weight)
with torch.no_grad(): nested_callable(m, 'bias.fill_')(0.)
return m
# %% ../nbs/01_layers.ipynb 58
def init_linear(m, act_func=None, init='auto', bias_std=0.01):
if getattr(m,'bias',None) is not None and bias_std is not None:
if bias_std != 0: normal_(m.bias, 0, bias_std)
else: m.bias.data.zero_()
if init=='auto':
if act_func in (F.relu_,F.leaky_relu_): init = kaiming_uniform_
else: init = nested_callable(act_func, '__class__.__default_init__')
if init == noop: init = getcallable(act_func, '__default_init__')
if callable(init): init(m.weight)
# %% ../nbs/01_layers.ipynb 60
def _conv_func(ndim=2, transpose=False):
"Return the proper conv `ndim` function, potentially `transposed`."
assert 1 <= ndim <=3
return getattr(nn, f'Conv{"Transpose" if transpose else ""}{ndim}d')
# %% ../nbs/01_layers.ipynb 62
defaults.activation=nn.ReLU
# %% ../nbs/01_layers.ipynb 63
|
LinBnDrop
|
python
|
bokeh__bokeh
|
src/bokeh/models/map_plots.py
|
{
"start": 3435,
"end": 4771
}
|
class ____(MapOptions):
''' Options for ``GMapPlot`` objects.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
map_type = Enum(MapType, default="roadmap", help="""
The `map type`_ to use for the ``GMapPlot``.
.. _map type: https://developers.google.com/maps/documentation/javascript/reference#MapTypeId
""")
scale_control = Bool(default=False, help="""
Whether the Google map should display its distance scale control.
""")
styles = Nullable(JSON, default=None, help="""
A JSON array of `map styles`_ to use for the ``GMapPlot``. Many example styles can
`be found here`_.
.. _map styles: https://developers.google.com/maps/documentation/javascript/reference#MapTypeStyle
.. _be found here: https://snazzymaps.com
""")
tilt = Int(default=45, help="""
`Tilt`_ angle of the map. The only allowed values are 0 and 45.
Only has an effect on 'satellite' and 'hybrid' map types.
A value of 0 causes the map to always use a 0 degree overhead view.
A value of 45 causes the tilt angle to switch to 45 imagery if available.
.. _Tilt: https://developers.google.com/maps/documentation/javascript/reference/3/map#MapOptions.tilt
""")
|
GMapOptions
|
python
|
ray-project__ray
|
rllib/utils/replay_buffers/multi_agent_replay_buffer.py
|
{
"start": 1865,
"end": 16374
}
|
class ____(ReplayBuffer):
"""A replay buffer shard for multiagent setups.
This buffer is meant to be run in parallel to distribute experiences
across `num_shards` shards. Unlike simpler buffers, it holds a set of
buffers - one for each policy ID.
"""
def __init__(
self,
capacity: int = 10000,
storage_unit: str = "timesteps",
num_shards: int = 1,
replay_mode: str = "independent",
replay_sequence_override: bool = True,
replay_sequence_length: int = 1,
replay_burn_in: int = 0,
replay_zero_init_states: bool = True,
underlying_buffer_config: dict = None,
**kwargs,
):
"""Initializes a MultiAgentReplayBuffer instance.
Args:
capacity: The capacity of the buffer, measured in `storage_unit`.
storage_unit: Either 'timesteps', 'sequences' or
'episodes'. Specifies how experiences are stored. If they
are stored in episodes, replay_sequence_length is ignored.
num_shards: The number of buffer shards that exist in total
(including this one).
replay_mode: One of "independent" or "lockstep". Determines,
whether batches are sampled independently or to an equal
amount.
replay_sequence_override: If True, ignore sequences found in incoming
batches, slicing them into sequences as specified by
`replay_sequence_length` and `replay_sequence_burn_in`. This only has
an effect if storage_unit is `sequences`.
replay_sequence_length: The sequence length (T) of a single
sample. If > 1, we will sample B x T from this buffer. This
only has an effect if storage_unit is 'timesteps'.
replay_burn_in: This is the number of timesteps
each sequence overlaps with the previous one to generate a
better internal state (=state after the burn-in), instead of
starting from 0.0 each RNN rollout. This only has an effect
if storage_unit is `sequences`.
replay_zero_init_states: Whether the initial states in the
buffer (if replay_sequence_length > 0) are alwayas 0.0 or
should be updated with the previous train_batch state outputs.
underlying_buffer_config: A config that contains all necessary
constructor arguments and arguments for methods to call on
the underlying buffers.
``**kwargs``: Forward compatibility kwargs.
"""
shard_capacity = capacity // num_shards
ReplayBuffer.__init__(self, capacity, storage_unit)
# If the user provides an underlying buffer config, we use to
# instantiate and interact with underlying buffers
self.underlying_buffer_config = underlying_buffer_config
if self.underlying_buffer_config is not None:
self.underlying_buffer_call_args = self.underlying_buffer_config
else:
self.underlying_buffer_call_args = {}
self.replay_sequence_override = replay_sequence_override
self.replay_mode = replay_mode
self.replay_sequence_length = replay_sequence_length
self.replay_burn_in = replay_burn_in
self.replay_zero_init_states = replay_zero_init_states
self.replay_sequence_override = replay_sequence_override
if (
replay_sequence_length > 1
and self.storage_unit is not StorageUnit.SEQUENCES
):
logger.warning(
"MultiAgentReplayBuffer configured with "
"`replay_sequence_length={}`, but `storage_unit={}`. "
"replay_sequence_length will be ignored and set to 1.".format(
replay_sequence_length, storage_unit
)
)
self.replay_sequence_length = 1
if replay_sequence_length == 1 and self.storage_unit is StorageUnit.SEQUENCES:
logger.warning(
"MultiAgentReplayBuffer configured with "
"`replay_sequence_length={}`, but `storage_unit={}`. "
"This will result in sequences equal to timesteps.".format(
replay_sequence_length, storage_unit
)
)
if replay_mode in ["lockstep", ReplayMode.LOCKSTEP]:
self.replay_mode = ReplayMode.LOCKSTEP
if self.storage_unit in [StorageUnit.EPISODES, StorageUnit.SEQUENCES]:
raise ValueError(
"MultiAgentReplayBuffer does not support "
"lockstep mode with storage unit `episodes`"
"or `sequences`."
)
elif replay_mode in ["independent", ReplayMode.INDEPENDENT]:
self.replay_mode = ReplayMode.INDEPENDENT
else:
raise ValueError("Unsupported replay mode: {}".format(replay_mode))
if self.underlying_buffer_config:
ctor_args = {
**{"capacity": shard_capacity, "storage_unit": StorageUnit.FRAGMENTS},
**self.underlying_buffer_config,
}
def new_buffer():
return from_config(self.underlying_buffer_config["type"], ctor_args)
else:
# Default case
def new_buffer():
self.underlying_buffer_call_args = {}
return ReplayBuffer(
self.capacity,
storage_unit=StorageUnit.FRAGMENTS,
)
self.replay_buffers = collections.defaultdict(new_buffer)
# Metrics.
self.add_batch_timer = _Timer()
self.replay_timer = _Timer()
self._num_added = 0
def __len__(self) -> int:
"""Returns the number of items currently stored in this buffer."""
return sum(len(buffer._storage) for buffer in self.replay_buffers.values())
@DeveloperAPI
@Deprecated(
old="ReplayBuffer.replay()",
new="ReplayBuffer.sample(num_items)",
error=True,
)
def replay(self, num_items: int = None, **kwargs) -> Optional[SampleBatchType]:
"""Deprecated in favor of new ReplayBuffer API."""
pass
@DeveloperAPI
@override(ReplayBuffer)
def add(self, batch: SampleBatchType, **kwargs) -> None:
"""Adds a batch to the appropriate policy's replay buffer.
Turns the batch into a MultiAgentBatch of the DEFAULT_POLICY_ID if
it is not a MultiAgentBatch. Subsequently, adds the individual policy
batches to the storage.
Args:
batch : The batch to be added.
``**kwargs``: Forward compatibility kwargs.
"""
if batch is None:
if log_once("empty_batch_added_to_buffer"):
logger.info(
"A batch that is `None` was added to {}. This can be "
"normal at the beginning of execution but might "
"indicate an issue.".format(type(self).__name__)
)
return
# Make a copy so the replay buffer doesn't pin plasma memory.
batch = batch.copy()
# Handle everything as if multi-agent.
batch = batch.as_multi_agent()
with self.add_batch_timer:
pids_and_batches = self._maybe_split_into_policy_batches(batch)
for policy_id, sample_batch in pids_and_batches.items():
self._add_to_underlying_buffer(policy_id, sample_batch, **kwargs)
self._num_added += batch.count
@DeveloperAPI
def _add_to_underlying_buffer(
self, policy_id: PolicyID, batch: SampleBatchType, **kwargs
) -> None:
"""Add a batch of experiences to the underlying buffer of a policy.
If the storage unit is `timesteps`, cut the batch into timeslices
before adding them to the appropriate buffer. Otherwise, let the
underlying buffer decide how slice batches.
Args:
policy_id: ID of the policy that corresponds to the underlying
buffer
batch: SampleBatch to add to the underlying buffer
``**kwargs``: Forward compatibility kwargs.
"""
# Merge kwargs, overwriting standard call arguments
kwargs = merge_dicts_with_warning(self.underlying_buffer_call_args, kwargs)
# For the storage unit `timesteps`, the underlying buffer will
# simply store the samples how they arrive. For sequences and
# episodes, the underlying buffer may split them itself.
if self.storage_unit is StorageUnit.TIMESTEPS:
timeslices = batch.timeslices(1)
elif self.storage_unit is StorageUnit.SEQUENCES:
timeslices = timeslice_along_seq_lens_with_overlap(
sample_batch=batch,
seq_lens=batch.get(SampleBatch.SEQ_LENS)
if self.replay_sequence_override
else None,
zero_pad_max_seq_len=self.replay_sequence_length,
pre_overlap=self.replay_burn_in,
zero_init_states=self.replay_zero_init_states,
)
elif self.storage_unit == StorageUnit.EPISODES:
timeslices = []
for eps in batch.split_by_episode():
if eps.get(SampleBatch.T)[0] == 0 and (
eps.get(SampleBatch.TERMINATEDS, [True])[-1]
or eps.get(SampleBatch.TRUNCATEDS, [False])[-1]
):
# Only add full episodes to the buffer
timeslices.append(eps)
else:
if log_once("only_full_episodes"):
logger.info(
"This buffer uses episodes as a storage "
"unit and thus allows only full episodes "
"to be added to it. Some samples may be "
"dropped."
)
elif self.storage_unit == StorageUnit.FRAGMENTS:
timeslices = [batch]
else:
raise ValueError("Unknown `storage_unit={}`".format(self.storage_unit))
for slice in timeslices:
self.replay_buffers[policy_id].add(slice, **kwargs)
@DeveloperAPI
@override(ReplayBuffer)
def sample(
self, num_items: int, policy_id: Optional[PolicyID] = None, **kwargs
) -> Optional[SampleBatchType]:
"""Samples a MultiAgentBatch of `num_items` per one policy's buffer.
If less than `num_items` records are in the policy's buffer,
some samples in the results may be repeated to fulfil the batch size
`num_items` request. Returns an empty batch if there are no items in
the buffer.
Args:
num_items: Number of items to sample from a policy's buffer.
policy_id: ID of the policy that created the experiences we sample. If
none is given, sample from all policies.
Returns:
Concatenated MultiAgentBatch of items.
``**kwargs``: Forward compatibility kwargs.
"""
# Merge kwargs, overwriting standard call arguments
kwargs = merge_dicts_with_warning(self.underlying_buffer_call_args, kwargs)
with self.replay_timer:
# Lockstep mode: Sample from all policies at the same time an
# equal amount of steps.
if self.replay_mode == ReplayMode.LOCKSTEP:
assert (
policy_id is None
), "`policy_id` specifier not allowed in `lockstep` mode!"
# In lockstep mode we sample MultiAgentBatches
return self.replay_buffers[_ALL_POLICIES].sample(num_items, **kwargs)
elif policy_id is not None:
sample = self.replay_buffers[policy_id].sample(num_items, **kwargs)
return MultiAgentBatch({policy_id: sample}, sample.count)
else:
samples = {}
for policy_id, replay_buffer in self.replay_buffers.items():
samples[policy_id] = replay_buffer.sample(num_items, **kwargs)
return MultiAgentBatch(samples, sum(s.count for s in samples.values()))
@DeveloperAPI
@override(ReplayBuffer)
def stats(self, debug: bool = False) -> Dict:
"""Returns the stats of this buffer and all underlying buffers.
Args:
debug: If True, stats of underlying replay buffers
are fetched with debug=True.
Returns:
stat: Dictionary of buffer stats.
"""
stat = {
"add_batch_time_ms": round(1000 * self.add_batch_timer.mean, 3),
"replay_time_ms": round(1000 * self.replay_timer.mean, 3),
}
for policy_id, replay_buffer in self.replay_buffers.items():
stat.update(
{"policy_{}".format(policy_id): replay_buffer.stats(debug=debug)}
)
return stat
@DeveloperAPI
@override(ReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Returns all local state.
Returns:
The serializable local state.
"""
state = {"num_added": self._num_added, "replay_buffers": {}}
for policy_id, replay_buffer in self.replay_buffers.items():
state["replay_buffers"][policy_id] = replay_buffer.get_state()
return state
@DeveloperAPI
@override(ReplayBuffer)
def set_state(self, state: Dict[str, Any]) -> None:
"""Restores all local state to the provided `state`.
Args:
state: The new state to set this buffer. Can be obtained by
calling `self.get_state()`.
"""
self._num_added = state["num_added"]
buffer_states = state["replay_buffers"]
for policy_id in buffer_states.keys():
self.replay_buffers[policy_id].set_state(buffer_states[policy_id])
def _maybe_split_into_policy_batches(self, batch: SampleBatchType):
"""Returns a dict of policy IDs and batches, depending on our replay mode.
This method helps with splitting up MultiAgentBatches only if the
self.replay_mode requires it.
"""
if self.replay_mode == ReplayMode.LOCKSTEP:
return {_ALL_POLICIES: batch}
else:
return batch.policy_batches
|
MultiAgentReplayBuffer
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 219073,
"end": 219926
}
|
class ____(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
|
TIPCTest
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 36565,
"end": 36999
}
|
class ____(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
action: Annotated[
Literal["create"], Field(description="The action to be performed on the entities.", title="Action")
]
entities: Annotated[
list[PoolBody], Field(description="A list of entities to be created.", title="Entities")
]
action_on_existence: BulkActionOnExistence | None = "fail"
|
BulkCreateActionPoolBody
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.