language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | openai__openai-python | src/openai/resources/conversations/items.py | {
"start": 21967,
"end": 22472
} | class ____:
def __init__(self, items: Items) -> None:
self._items = items
self.create = _legacy_response.to_raw_response_wrapper(
items.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
items.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
items.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
items.delete,
)
| ItemsWithRawResponse |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image_bytes01.py | {
"start": 374,
"end": 2550
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
with open(self.image_dir + "red.png", "rb") as image_file:
image_data = BytesIO(image_file.read())
worksheet.insert_image("E9", "red.png", {"image_data": image_data})
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
with open(self.image_dir + "red.png", "rb") as image_file:
image_data = BytesIO(image_file.read())
worksheet.insert_image("E9", "red.png", {"image_data": image_data})
workbook.close()
self.assertExcelEqual()
def test_create_file_from_buffer(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
with open(self.image_dir + "red.png", "rb") as image_file:
image_data = BytesIO(image_file.read())
worksheet.insert_image("E9", image_data, {"description": "red.png"})
workbook.close()
self.assertExcelEqual()
def test_create_file_from_image_object(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
with open(self.image_dir + "red.png", "rb") as image_file:
image_data = BytesIO(image_file.read())
image = Image(image_data)
image.description = "red.png"
worksheet.insert_image("E9", image)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/big_bird/configuration_big_bird.py | {
"start": 804,
"end": 7316
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BigBirdModel`]. It is used to instantiate an
BigBird model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the BigBird
[google/bigbird-roberta-base](https://huggingface.co/google/bigbird-roberta-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50358):
Vocabulary size of the BigBird model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BigBirdModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 1024 or 2048 or 4096).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`BigBirdModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
attention_type (`str`, *optional*, defaults to `"block_sparse"`)
Whether to use block sparse attention (with n complexity) as introduced in paper or original attention
layer (with n^2 complexity). Possible values are `"original_full"` and `"block_sparse"`.
use_bias (`bool`, *optional*, defaults to `True`)
Whether to use bias in query, key, value.
rescale_embeddings (`bool`, *optional*, defaults to `False`)
Whether to rescale embeddings with (hidden_size ** 0.5).
block_size (`int`, *optional*, defaults to 64)
Size of each block. Useful only when `attention_type == "block_sparse"`.
num_random_blocks (`int`, *optional*, defaults to 3)
Each query is going to attend these many number of random blocks. Useful only when `attention_type ==
"block_sparse"`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Example:
```python
>>> from transformers import BigBirdConfig, BigBirdModel
>>> # Initializing a BigBird google/bigbird-roberta-base style configuration
>>> configuration = BigBirdConfig()
>>> # Initializing a model (with random weights) from the google/bigbird-roberta-base style configuration
>>> model = BigBirdModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "big_bird"
def __init__(
self,
vocab_size=50358,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu_new",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=4096,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
sep_token_id=66,
attention_type="block_sparse",
use_bias=True,
rescale_embeddings=False,
block_size=64,
num_random_blocks=3,
classifier_dropout=None,
**kwargs,
):
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
sep_token_id=sep_token_id,
**kwargs,
)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.rescale_embeddings = rescale_embeddings
self.attention_type = attention_type
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
self.classifier_dropout = classifier_dropout
__all__ = ["BigBirdConfig"]
| BigBirdConfig |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 735,
"end": 9928
} | class ____(TestCase):
def setUp(self):
self.addCleanup(
self.assertEqual,
validators._META_SCHEMAS,
dict(validators._META_SCHEMAS),
)
self.addCleanup(
self.assertEqual,
validators._VALIDATORS,
dict(validators._VALIDATORS),
)
self.meta_schema = {"$id": "some://meta/schema"}
self.validators = {"fail": fail}
self.type_checker = TypeChecker()
self.Validator = validators.create(
meta_schema=self.meta_schema,
validators=self.validators,
type_checker=self.type_checker,
)
def test_attrs(self):
self.assertEqual(
(
self.Validator.VALIDATORS,
self.Validator.META_SCHEMA,
self.Validator.TYPE_CHECKER,
), (
self.validators,
self.meta_schema,
self.type_checker,
),
)
def test_init(self):
schema = {"fail": []}
self.assertEqual(self.Validator(schema).schema, schema)
def test_iter_errors_successful(self):
schema = {"fail": []}
validator = self.Validator(schema)
errors = list(validator.iter_errors("hello"))
self.assertEqual(errors, [])
def test_iter_errors_one_error(self):
schema = {"fail": [{"message": "Whoops!"}]}
validator = self.Validator(schema)
expected_error = exceptions.ValidationError(
"Whoops!",
instance="goodbye",
schema=schema,
validator="fail",
validator_value=[{"message": "Whoops!"}],
schema_path=deque(["fail"]),
)
errors = list(validator.iter_errors("goodbye"))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]._contents(), expected_error._contents())
def test_iter_errors_multiple_errors(self):
schema = {
"fail": [
{"message": "First"},
{"message": "Second!", "validator": "asdf"},
{"message": "Third"},
],
}
validator = self.Validator(schema)
errors = list(validator.iter_errors("goodbye"))
self.assertEqual(len(errors), 3)
def test_if_a_version_is_provided_it_is_registered(self):
Validator = validators.create(
meta_schema={"$id": "something"},
version="my version",
)
self.addCleanup(validators._META_SCHEMAS.pop, "something")
self.addCleanup(validators._VALIDATORS.pop, "my version")
self.assertEqual(Validator.__name__, "MyVersionValidator")
self.assertEqual(Validator.__qualname__, "MyVersionValidator")
def test_repr(self):
Validator = validators.create(
meta_schema={"$id": "something"},
version="my version",
)
self.addCleanup(validators._META_SCHEMAS.pop, "something")
self.addCleanup(validators._VALIDATORS.pop, "my version")
self.assertEqual(
repr(Validator({})),
"MyVersionValidator(schema={}, format_checker=None)",
)
def test_long_repr(self):
Validator = validators.create(
meta_schema={"$id": "something"},
version="my version",
)
self.addCleanup(validators._META_SCHEMAS.pop, "something")
self.addCleanup(validators._VALIDATORS.pop, "my version")
self.assertEqual(
repr(Validator({"a": list(range(1000))})), (
"MyVersionValidator(schema={'a': [0, 1, 2, 3, 4, 5, ...]}, "
"format_checker=None)"
),
)
def test_repr_no_version(self):
Validator = validators.create(meta_schema={})
self.assertEqual(
repr(Validator({})),
"Validator(schema={}, format_checker=None)",
)
def test_dashes_are_stripped_from_validator_names(self):
Validator = validators.create(
meta_schema={"$id": "something"},
version="foo-bar",
)
self.addCleanup(validators._META_SCHEMAS.pop, "something")
self.addCleanup(validators._VALIDATORS.pop, "foo-bar")
self.assertEqual(Validator.__qualname__, "FooBarValidator")
def test_if_a_version_is_not_provided_it_is_not_registered(self):
original = dict(validators._META_SCHEMAS)
validators.create(meta_schema={"id": "id"})
self.assertEqual(validators._META_SCHEMAS, original)
def test_validates_registers_meta_schema_id(self):
meta_schema_key = "meta schema id"
my_meta_schema = {"id": meta_schema_key}
validators.create(
meta_schema=my_meta_schema,
version="my version",
id_of=lambda s: s.get("id", ""),
)
self.addCleanup(validators._META_SCHEMAS.pop, meta_schema_key)
self.addCleanup(validators._VALIDATORS.pop, "my version")
self.assertIn(meta_schema_key, validators._META_SCHEMAS)
def test_validates_registers_meta_schema_draft6_id(self):
meta_schema_key = "meta schema $id"
my_meta_schema = {"$id": meta_schema_key}
validators.create(
meta_schema=my_meta_schema,
version="my version",
)
self.addCleanup(validators._META_SCHEMAS.pop, meta_schema_key)
self.addCleanup(validators._VALIDATORS.pop, "my version")
self.assertIn(meta_schema_key, validators._META_SCHEMAS)
def test_create_default_types(self):
Validator = validators.create(meta_schema={}, validators=())
self.assertTrue(
all(
Validator({}).is_type(instance=instance, type=type)
for type, instance in [
("array", []),
("boolean", True),
("integer", 12),
("null", None),
("number", 12.0),
("object", {}),
("string", "foo"),
]
),
)
def test_check_schema_with_different_metaschema(self):
"""
One can create a validator class whose metaschema uses a different
dialect than itself.
"""
NoEmptySchemasValidator = validators.create(
meta_schema={
"$schema": validators.Draft202012Validator.META_SCHEMA["$id"],
"not": {"const": {}},
},
)
NoEmptySchemasValidator.check_schema({"foo": "bar"})
with self.assertRaises(exceptions.SchemaError):
NoEmptySchemasValidator.check_schema({})
NoEmptySchemasValidator({"foo": "bar"}).validate("foo")
def test_check_schema_with_different_metaschema_defaults_to_self(self):
"""
A validator whose metaschema doesn't declare $schema defaults to its
own validation behavior, not the latest "normal" specification.
"""
NoEmptySchemasValidator = validators.create(
meta_schema={"fail": [{"message": "Meta schema whoops!"}]},
validators={"fail": fail},
)
with self.assertRaises(exceptions.SchemaError):
NoEmptySchemasValidator.check_schema({})
def test_extend(self):
original = dict(self.Validator.VALIDATORS)
new = object()
Extended = validators.extend(
self.Validator,
validators={"new": new},
)
self.assertEqual(
(
Extended.VALIDATORS,
Extended.META_SCHEMA,
Extended.TYPE_CHECKER,
self.Validator.VALIDATORS,
), (
dict(original, new=new),
self.Validator.META_SCHEMA,
self.Validator.TYPE_CHECKER,
original,
),
)
def test_extend_idof(self):
"""
Extending a validator preserves its notion of schema IDs.
"""
def id_of(schema):
return schema.get("__test__", self.Validator.ID_OF(schema))
correct_id = "the://correct/id/"
meta_schema = {
"$id": "the://wrong/id/",
"__test__": correct_id,
}
Original = validators.create(
meta_schema=meta_schema,
validators=self.validators,
type_checker=self.type_checker,
id_of=id_of,
)
self.assertEqual(Original.ID_OF(Original.META_SCHEMA), correct_id)
Derived = validators.extend(Original)
self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id)
def test_extend_applicable_validators(self):
"""
Extending a validator preserves its notion of applicable validators.
"""
schema = {
"$defs": {"test": {"type": "number"}},
"$ref": "#/$defs/test",
"maximum": 1,
}
draft4 = validators.Draft4Validator(schema)
self.assertTrue(draft4.is_valid(37)) # as $ref ignores siblings
Derived = validators.extend(validators.Draft4Validator)
self.assertTrue(Derived(schema).is_valid(37))
| TestCreateAndExtend |
python | django__django | tests/queries/models.py | {
"start": 17655,
"end": 17861
} | class ____(models.Model):
related = models.ForeignKey(
Individual, models.CASCADE, related_name="related_individual"
)
class Meta:
db_table = "RelatedIndividual"
| RelatedIndividual |
python | bokeh__bokeh | tests/unit/bokeh/server/test_contexts.py | {
"start": 2885,
"end": 6276
} | class ____:
def test_init(self) -> None:
c = bsc.ApplicationContext("app", io_loop="ioloop")
assert c.io_loop == "ioloop"
assert c.application == "app"
assert c.url is None
c = bsc.ApplicationContext("app", io_loop="ioloop", url="url")
assert c.io_loop == "ioloop"
assert c.application == "app"
assert c.url == "url"
def test_sessions(self) -> None:
c = bsc.ApplicationContext("app", io_loop="ioloop")
c._sessions = dict(foo=1, bar=2)
assert set(c.sessions) == {1, 2}
def test_get_session_success(self) -> None:
c = bsc.ApplicationContext("app", io_loop="ioloop")
c._sessions = dict(foo=1, bar=2)
assert c.get_session("foo") == 1
def test_get_session_failure(self) -> None:
c = bsc.ApplicationContext("app", io_loop="ioloop")
c._sessions = dict(foo=1, bar=2)
with pytest.raises(bsc.ProtocolError) as e:
c.get_session("bax")
assert str(e.value).endswith("No such session bax")
async def test_create_session_if_needed_new(self) -> None:
app = Application()
c = bsc.ApplicationContext(app, io_loop="ioloop")
s = await c.create_session_if_needed("foo")
assert c.get_session("foo") == s
async def test_create_session_if_needed_exists(self) -> None:
app = Application()
c = bsc.ApplicationContext(app, io_loop="ioloop")
s1 = await c.create_session_if_needed("foo")
s2 = await c.create_session_if_needed("foo")
assert s1 == s2
async def test_create_session_if_needed_bad_sessionid(self) -> None:
app = Application()
c = bsc.ApplicationContext(app, io_loop="ioloop")
r = c.create_session_if_needed("")
with pytest.raises(bsc.ProtocolError) as e:
await r
assert str(e.value).endswith("Session ID must not be empty")
async def test_create_session_if_needed_logout_url(self) -> None:
app = Application()
c = bsc.ApplicationContext(app, io_loop="ioloop", logout_url="/logout")
s = await c.create_session_if_needed("foo")
session = c.get_session("foo")
assert session == s
assert c._session_contexts[session.id].logout_url == "/logout"
async def test_async_next_tick_callback_is_called(self) -> None:
app = Application()
c = bsc.ApplicationContext(app, io_loop=IOLoop.current())
s = await c.create_session_if_needed("foo")
latch_f = asyncio.Future()
result_f = asyncio.Future()
async def cb():
m = await latch_f
result_f.set_result(m)
s.document.add_next_tick_callback(cb)
message = 'Done'
latch_f.set_result(message)
result = await asyncio.wait_for(result_f, 1)
assert result == message
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| TestApplicationContext |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 97940,
"end": 99044
} | class ____:
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
def test_ldexp_overflow(self):
# silence warning emitted on overflow
with np.errstate(over="ignore"):
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
| TestLdexp |
python | gevent__gevent | src/gevent/tests/test__threadpool.py | {
"start": 13414,
"end": 14075
} | class ____(TestCase):
@greentest.reraises_flaky_race_condition()
def test(self):
pool = self.pool = self._makeOne(2, create_all_worker_threads=False)
self.assertEqual(pool.size, 0)
pool.size = 1
self.assertEqual(pool.size, 1)
pool.size = 2
self.assertEqual(pool.size, 2)
pool.size = 1
self.assertEqual(pool.size, 1)
with self.assertRaises(ValueError):
pool.size = -1
with self.assertRaises(ValueError):
pool.size = 3
pool.size = 0
self.assertEqual(pool.size, 0)
pool.size = 2
self.assertEqual(pool.size, 2)
| TestSize |
python | pennersr__django-allauth | allauth/socialaccount/providers/exist/provider.py | {
"start": 429,
"end": 1224
} | class ____(OAuth2Provider):
id = "exist"
name = "Exist.io"
account_class = ExistAccount
oauth2_adapter_class = ExistOAuth2Adapter
def extract_uid(self, data):
return data.get("username")
def extract_common_fields(self, data):
extra_common = super().extract_common_fields(data)
extra_common.update(
username=data.get("username"),
first_name=data.get("first_name"),
last_name=data.get("last_name"),
avatar=data.get("avatar"),
timezone=data.get("timezone"),
local_time=data.get("local_time"),
)
return extra_common
def get_default_scope(self):
return ["mood_read", "health_read", "productivity_read"]
provider_classes = [ExistProvider]
| ExistProvider |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 435,
"end": 548
} | class ____[T: float = int]: ...
# This should generate an error because default must be a subtype of bound.
| ClassT2 |
python | tensorflow__tensorflow | tensorflow/python/framework/function_test.py | {
"start": 37848,
"end": 46013
} | class ____(test.TestCase):
def stripInternalFunctionDefAnnotations(self, f_def):
result = function_pb2.FunctionDef()
result.CopyFrom(f_def)
result.attr.pop("_construction_context", None)
return result
def expectFunctionsEqual(self, func, grad_func=None, new_func=None):
if new_func is None:
# Make a copy of func.definition to avoid any bugs masked by using the
# same object
serialized_fdef = func.definition.SerializeToString()
# Serialize and then deserialize `func` to create `new_func`
fdef = function_pb2.FunctionDef.FromString(serialized_fdef)
new_func = function._from_definition(fdef, grad_func=grad_func)
self.assertEqual(func.name, new_func.name)
self.assertEqual(
self.stripInternalFunctionDefAnnotations(func.definition),
self.stripInternalFunctionDefAnnotations(new_func.definition))
self.assertEqual(func.grad_func_name, new_func.grad_func_name)
self.assertEqual(func.declared_input_types, new_func.declared_input_types)
self.assertEqual(func.captured_inputs, new_func.captured_inputs)
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y):
return x + y
self.expectFunctionsEqual(Foo)
def testGradFunc(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G(x, dy):
return x * dy
@function.Defun(dtypes.float32, grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
self.expectFunctionsEqual(F, grad_func=G)
def testCapturedInputs(self):
c = constant_op.constant(10, dtypes.int64)
@function.Defun(dtypes.int64)
def Foo(x):
return x + c
new_func = function._from_definition(Foo.definition)
self.assertEqual(Foo.name, new_func.name)
self.assertEqual(
self.stripInternalFunctionDefAnnotations(Foo.definition),
self.stripInternalFunctionDefAnnotations(new_func.definition))
self.assertEqual(Foo.grad_func_name, new_func.grad_func_name)
# Captured inputs are added as regular inputs to the function definition
self.assertEqual(new_func.declared_input_types,
Foo.declared_input_types + (dtypes.int64,))
self.assertEqual(len(new_func.captured_inputs), 0)
def testNestedFunctions(self):
@function.Defun(dtypes.float32)
def Outer(x):
@function.Defun(dtypes.float32)
def Inner(y):
return y + 1
return Inner(Inner(x))
self.expectFunctionsEqual(Outer)
def testFromLibrary(self):
# Define some functions with different gradient functions. Note that many of
# the below functions are identical since function bodies don't matter for
# this test.
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32, dtypes.float32)
def G2(x, dy):
return x * dy
# F1 and F2 have the same gradient function
@function.Defun(dtypes.float32, grad_func=G1)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32, grad_func=G1)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F3 has a different gradient function
@function.Defun(dtypes.float32, grad_func=G2)
def F3(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F4 has no gradient function
@function.Defun(dtypes.float32)
def F4(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Instantiate all functions
g = ops.Graph()
with g.as_default():
c = constant_op.constant(1.0, dtypes.float32)
f1 = F1(c)
f2 = F2(c)
f3 = F3(c)
f4 = F4(c)
gradients_impl.gradients([f1, f2, f3, f4], c)
library = g.as_graph_def().library
new_funcs = function.from_library(library)
def CheckNewFunc(func):
new_func = [f for f in new_funcs if f.name == func.name]
self.assertEqual(len(new_func), 1)
self.expectFunctionsEqual(func, new_func=new_func[0])
CheckNewFunc(G1)
CheckNewFunc(G2)
CheckNewFunc(F1)
CheckNewFunc(F2)
CheckNewFunc(F3)
CheckNewFunc(F4)
def testFromLibraryEmptyLib(self):
library = function_pb2.FunctionDefLibrary()
self.assertEqual(len(function.from_library(library)), 0)
def testFromLibraryMissingFuncDef(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
gradient = function_pb2.GradientDef()
gradient.function_name = F1.name
gradient.gradient_func = G1.name
# Create invalid function def that is missing G1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([F1.definition])
with self.assertRaisesRegex(
ValueError,
"FunctionDefLibrary missing 'G1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function.from_library(library)
# Create invalid function def that is missing F1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([G1.definition])
with self.assertRaisesRegex(
ValueError,
"FunctionDefLibrary missing 'F1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function.from_library(library)
def testFromLibraryCyclicGradFuncs(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Create invalid function def library where F1 has gradient function F2 and
# F2 has gradient function F1
library = function_pb2.FunctionDefLibrary()
library.function.extend([F1.definition, F2.definition])
gradient1 = function_pb2.GradientDef()
gradient1.function_name = F1.name
gradient1.gradient_func = F2.name
gradient2 = function_pb2.GradientDef()
gradient2.function_name = F2.name
gradient2.gradient_func = F1.name
library.gradient.extend([gradient1, gradient2])
with self.assertRaisesRegex(
ValueError, "FunctionDefLibrary contains cyclic gradient functions!"):
function.from_library(library)
def testExperimentalAttrs(self):
@function.Defun(dtypes.int32, experimental_tag="tag_value")
def FunctionWithStrAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=123)
def FunctionWithIntAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=123.0)
def FunctionWithFloatAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=True)
def FunctionWithBoolAttr(i):
return array_ops.identity(i)
self.assertTrue("experimental_tag" in FunctionWithStrAttr.definition.attr)
self.assertEqual(FunctionWithStrAttr.definition.attr["experimental_tag"].s,
b"tag_value")
self.assertTrue("experimental_tag" in FunctionWithIntAttr.definition.attr)
self.assertEqual(FunctionWithIntAttr.definition.attr["experimental_tag"].i,
123)
self.assertTrue("experimental_tag" in FunctionWithFloatAttr.definition.attr)
self.assertEqual(
FunctionWithFloatAttr.definition.attr["experimental_tag"].f, 123.0)
self.assertTrue("experimental_tag" in FunctionWithBoolAttr.definition.attr)
self.assertEqual(FunctionWithBoolAttr.definition.attr["experimental_tag"].b,
True)
def testImplementsReferenceAttrs(self):
@function.Defun(
dtypes.int32, _implements="org.google.lstm", _reference="arxiv.org")
def FunctionWithStrAttr(i):
return array_ops.identity(i)
self.assertIn("_implements", FunctionWithStrAttr.definition.attr)
self.assertEqual(FunctionWithStrAttr.definition.attr["_implements"].s,
b"org.google.lstm")
self.assertIn("_reference", FunctionWithStrAttr.definition.attr)
self.assertEqual(FunctionWithStrAttr.definition.attr["_reference"].s,
b"arxiv.org")
| FunctionsFromProtos |
python | has2k1__plotnine | plotnine/scales/scale_size.py | {
"start": 387,
"end": 871
} | class ____(scale_discrete):
"""
Discrete area size scale
"""
_aesthetics = ["size"]
range: InitVar[tuple[float, float]] = (2, 6)
"""
Range ([Minimum, Maximum]) of the size.
"""
def __post_init__(self, range):
super().__post_init__()
def palette(value):
area = np.linspace(range[0] ** 2, range[1] ** 2, value)
return np.sqrt(area)
self.palette = palette # type: ignore
@dataclass
| scale_size_ordinal |
python | getsentry__sentry | src/sentry/snuba/metrics/fields/base.py | {
"start": 11085,
"end": 11148
} | class ____:
op: MetricOperationType
| MetricOperationDefinition |
python | PrefectHQ__prefect | src/prefect/workers/base.py | {
"start": 14701,
"end": 15068
} | class ____(BaseModel, abc.ABC):
identifier: str
status_code: int
def __bool__(self) -> bool:
return self.status_code == 0
C = TypeVar("C", bound=BaseJobConfiguration)
V = TypeVar("V", bound=BaseVariables)
R = TypeVar("R", bound=BaseWorkerResult)
FR = TypeVar("FR") # used to capture the return type of a flow
@register_base_type
| BaseWorkerResult |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-google/llama_index/vector_stores/google/genai_extension.py | {
"start": 14329,
"end": 18683
} | class ____(Exception):
finish_reason: genai.Candidate.FinishReason
finish_message: str
safety_ratings: MutableSequence[genai.SafetyRating]
def __str__(self) -> str:
return (
f"finish_reason: {self.finish_reason} "
f"finish_message: {self.finish_message} "
f"safety ratings: {self.safety_ratings}"
)
def generate_answer(
*,
prompt: str,
passages: List[str],
answer_style: int = genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_settings: List[genai.SafetySetting] = [],
temperature: Optional[float] = None,
client: genai.GenerativeServiceClient,
) -> GroundedAnswer:
# TODO: Consider passing in the corpus ID instead of the actual
# passages.
response = client.generate_answer(
genai.GenerateAnswerRequest(
contents=[
genai.Content(parts=[genai.Part(text=prompt)]),
],
model=_DEFAULT_GENERATE_SERVICE_MODEL,
answer_style=answer_style,
safety_settings=safety_settings,
temperature=temperature,
inline_passages=genai.GroundingPassages(
passages=[
genai.GroundingPassage(
# IDs here takes alphanumeric only. No dashes allowed.
id=str(index),
content=genai.Content(parts=[genai.Part(text=chunk)]),
)
for index, chunk in enumerate(passages)
]
),
)
)
if response.answer.finish_reason != genai.Candidate.FinishReason.STOP:
finish_message = _get_finish_message(response.answer)
raise GenerateAnswerError(
finish_reason=response.answer.finish_reason,
finish_message=finish_message,
safety_ratings=response.answer.safety_ratings,
)
assert len(response.answer.content.parts) == 1
return GroundedAnswer(
answer=response.answer.content.parts[0].text,
attributed_passages=[
Passage(
text=passage.content.parts[0].text,
id=passage.source_id.grounding_passage.passage_id,
)
for passage in response.answer.grounding_attributions
if len(passage.content.parts) > 0
],
answerable_probability=response.answerable_probability,
)
# TODO: Use candidate.finish_message when that field is launched.
# For now, we derive this message from other existing fields.
def _get_finish_message(candidate: genai.Candidate) -> str:
finish_messages: Dict[int, str] = {
genai.Candidate.FinishReason.MAX_TOKENS: "Maximum token in context window reached.",
genai.Candidate.FinishReason.SAFETY: "Blocked because of safety",
genai.Candidate.FinishReason.RECITATION: "Blocked because of recitation",
}
finish_reason = candidate.finish_reason
if finish_reason not in finish_messages:
return "Unexpected generation error"
return finish_messages[finish_reason]
def _convert_to_metadata(metadata: Dict[str, Any]) -> List[genai.CustomMetadata]:
cs: List[genai.CustomMetadata] = []
for key, value in metadata.items():
if isinstance(value, str):
c = genai.CustomMetadata(key=key, string_value=value)
elif isinstance(value, (float, int)):
c = genai.CustomMetadata(key=key, numeric_value=value)
else:
raise ValueError(f"Metadata value {value} is not supported")
cs.append(c)
return cs
def _convert_filter(fs: Optional[Dict[str, Any]]) -> List[genai.MetadataFilter]:
if fs is None:
return []
assert isinstance(fs, dict)
filters: List[genai.MetadataFilter] = []
for key, value in fs.items():
if isinstance(value, str):
condition = genai.Condition(
operation=genai.Condition.Operator.EQUAL, string_value=value
)
elif isinstance(value, (float, int)):
condition = genai.Condition(
operation=genai.Condition.Operator.EQUAL, numeric_value=value
)
else:
raise ValueError(f"Filter value {value} is not supported")
filters.append(genai.MetadataFilter(key=key, conditions=[condition]))
return filters
| GenerateAnswerError |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/executor/base.py | {
"start": 760,
"end": 4070
} | class ____(ABC):
@public
@abstractmethod
def execute(
self, plan_context: "PlanOrchestrationContext", execution_plan: "ExecutionPlan"
) -> Iterator["DagsterEvent"]:
"""For the given context and execution plan, orchestrate a series of sub plan executions in a way that satisfies the whole plan being executed.
Args:
plan_context (PlanOrchestrationContext): The plan's orchestration context.
execution_plan (ExecutionPlan): The plan to execute.
Returns:
A stream of dagster events.
"""
@public
@property
@abstractmethod
def retries(self) -> RetryMode:
"""Whether retries are enabled or disabled for this instance of the executor.
Executors should allow this to be controlled via configuration if possible.
Returns: RetryMode
"""
@property
def step_dependency_config(self) -> StepDependencyConfig:
return StepDependencyConfig.default()
def get_step_event_or_retry_event(
self,
step_context: "IStepContext",
err_info: SerializableErrorInfo,
known_state: "KnownExecutionState",
original_event: "DagsterEvent",
):
from dagster._core.events import DagsterEvent
retry_policy = step_context.op_retry_policy
retry_state = known_state.get_retry_state()
previous_attempt_count = retry_state.get_attempt_count(step_context.step.key)
should_retry = (
retry_policy
and not step_context.retry_mode.disabled
and previous_attempt_count < retry_policy.max_retries
)
if should_retry:
return DagsterEvent.step_retry_event(
step_context,
StepRetryData(
error=err_info,
seconds_to_wait=check.not_none(retry_policy).calculate_delay(
previous_attempt_count + 1
),
),
)
else:
return original_event
def get_failure_or_retry_event_after_error(
self,
step_context: "IStepContext",
err_info: SerializableErrorInfo,
known_state: "KnownExecutionState",
):
from dagster._core.events import DagsterEvent
# determine the retry policy for the step if needed
retry_policy = step_context.op_retry_policy
retry_state = known_state.get_retry_state()
previous_attempt_count = retry_state.get_attempt_count(step_context.step.key)
should_retry = (
retry_policy
and not step_context.retry_mode.disabled
and previous_attempt_count < retry_policy.max_retries
)
if should_retry:
return DagsterEvent.step_retry_event(
step_context,
StepRetryData(
error=err_info,
seconds_to_wait=check.not_none(retry_policy).calculate_delay(
previous_attempt_count + 1
),
),
)
else:
return DagsterEvent.step_failure_event(
step_context=step_context,
step_failure_data=StepFailureData(error=err_info, user_failure_data=None),
)
| Executor |
python | numpy__numpy | numpy/_core/tests/test_unicode.py | {
"start": 5775,
"end": 6084
} | class ____(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Assignment tests
############################################################
| TestCreateValues_1009_UCS4 |
python | mwaskom__seaborn | tests/test_relational.py | {
"start": 2509,
"end": 25575
} | class ____(Helpers):
def test_wide_df_variables(self, wide_df):
p = _RelationalPlotter()
p.assign_variables(data=wide_df)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
assert len(p.plot_data) == np.prod(wide_df.shape)
x = p.plot_data["x"]
expected_x = np.tile(wide_df.index, wide_df.shape[1])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = wide_df.to_numpy().ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(wide_df.columns.to_numpy(), wide_df.shape[0])
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] == wide_df.index.name
assert p.variables["y"] is None
assert p.variables["hue"] == wide_df.columns.name
assert p.variables["style"] == wide_df.columns.name
def test_wide_df_with_nonnumeric_variables(self, long_df):
p = _RelationalPlotter()
p.assign_variables(data=long_df)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
numeric_df = long_df.select_dtypes("number")
assert len(p.plot_data) == np.prod(numeric_df.shape)
x = p.plot_data["x"]
expected_x = np.tile(numeric_df.index, numeric_df.shape[1])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = numeric_df.to_numpy().ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(
numeric_df.columns.to_numpy(), numeric_df.shape[0]
)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] == numeric_df.index.name
assert p.variables["y"] is None
assert p.variables["hue"] == numeric_df.columns.name
assert p.variables["style"] == numeric_df.columns.name
def test_wide_array_variables(self, wide_array):
p = _RelationalPlotter()
p.assign_variables(data=wide_array)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
assert len(p.plot_data) == np.prod(wide_array.shape)
nrow, ncol = wide_array.shape
x = p.plot_data["x"]
expected_x = np.tile(np.arange(nrow), ncol)
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = wide_array.ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(ncol), nrow)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_flat_array_variables(self, flat_array):
p = _RelationalPlotter()
p.assign_variables(data=flat_array)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == np.prod(flat_array.shape)
x = p.plot_data["x"]
expected_x = np.arange(flat_array.shape[0])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_array
assert_array_equal(y, expected_y)
assert p.variables["x"] is None
assert p.variables["y"] is None
def test_flat_list_variables(self, flat_list):
p = _RelationalPlotter()
p.assign_variables(data=flat_list)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == len(flat_list)
x = p.plot_data["x"]
expected_x = np.arange(len(flat_list))
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_list
assert_array_equal(y, expected_y)
assert p.variables["x"] is None
assert p.variables["y"] is None
def test_flat_series_variables(self, flat_series):
p = _RelationalPlotter()
p.assign_variables(data=flat_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == len(flat_series)
x = p.plot_data["x"]
expected_x = flat_series.index
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_series
assert_array_equal(y, expected_y)
assert p.variables["x"] is flat_series.index.name
assert p.variables["y"] is flat_series.name
def test_wide_list_of_series_variables(self, wide_list_of_series):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_series)
chunk_size = max(len(l) for l in wide_list_of_series)
assert len(p.plot_data) == chunks * chunk_size
index_union = np.unique(
np.concatenate([s.index for s in wide_list_of_series])
)
x = p.plot_data["x"]
expected_x = np.tile(index_union, chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = np.concatenate([
s.reindex(index_union) for s in wide_list_of_series
])
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
series_names = [s.name for s in wide_list_of_series]
expected_hue = np.repeat(series_names, chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_list_of_arrays_variables(self, wide_list_of_arrays):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_arrays)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_arrays)
chunk_size = max(len(l) for l in wide_list_of_arrays)
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(wide_list_of_arrays)
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(chunks), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_list_of_list_variables(self, wide_list_of_lists):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_lists)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_lists)
chunk_size = max(len(l) for l in wide_list_of_lists)
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(wide_list_of_lists)
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(chunks), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_series_variables(self, wide_dict_of_series):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_series)
chunk_size = max(len(l) for l in wide_dict_of_series.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_series.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_series), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_arrays_variables(self, wide_dict_of_arrays):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_arrays)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_arrays)
chunk_size = max(len(l) for l in wide_dict_of_arrays.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_arrays.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_arrays), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_lists_variables(self, wide_dict_of_lists):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_lists)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_lists)
chunk_size = max(len(l) for l in wide_dict_of_lists.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_lists.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_lists), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_relplot_simple(self, long_df):
g = relplot(data=long_df, x="x", y="y", kind="scatter")
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(x, long_df["x"])
assert_array_equal(y, long_df["y"])
g = relplot(data=long_df, x="x", y="y", kind="line")
x, y = g.ax.lines[0].get_xydata().T
expected = long_df.groupby("x").y.mean()
assert_array_equal(x, expected.index)
assert y == pytest.approx(expected.values)
with pytest.raises(ValueError):
g = relplot(data=long_df, x="x", y="y", kind="not_a_kind")
def test_relplot_complex(self, long_df):
for sem in ["hue", "size", "style"]:
g = relplot(data=long_df, x="x", y="y", **{sem: "a"})
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(x, long_df["x"])
assert_array_equal(y, long_df["y"])
for sem in ["hue", "size", "style"]:
g = relplot(
data=long_df, x="x", y="y", col="c", **{sem: "a"}
)
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
for sem in ["size", "style"]:
g = relplot(
data=long_df, x="x", y="y", hue="b", col="c", **{sem: "a"}
)
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
for sem in ["hue", "size", "style"]:
g = relplot(
data=long_df.sort_values(["c", "b"]),
x="x", y="y", col="b", row="c", **{sem: "a"}
)
grouped = long_df.groupby(["c", "b"])
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
@pytest.mark.parametrize("vector_type", ["series", "numpy", "list"])
def test_relplot_vectors(self, long_df, vector_type):
semantics = dict(x="x", y="y", hue="f", col="c")
kws = {key: long_df[val] for key, val in semantics.items()}
if vector_type == "numpy":
kws = {k: v.to_numpy() for k, v in kws.items()}
elif vector_type == "list":
kws = {k: v.to_list() for k, v in kws.items()}
g = relplot(data=long_df, **kws)
grouped = long_df.groupby("c")
assert len(g.axes_dict) == len(grouped)
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
def test_relplot_wide(self, wide_df):
g = relplot(data=wide_df)
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(y, wide_df.to_numpy().T.ravel())
assert not g.ax.get_ylabel()
def test_relplot_hues(self, long_df):
palette = ["r", "b", "g"]
g = relplot(
x="x", y="y", hue="a", style="b", col="c",
palette=palette, data=long_df
)
palette = dict(zip(long_df["a"].unique(), palette))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_hues = [palette[val] for val in grp_df["a"]]
assert same_color(points.get_facecolors(), expected_hues)
def test_relplot_sizes(self, long_df):
sizes = [5, 12, 7]
g = relplot(
data=long_df,
x="x", y="y", size="a", hue="b", col="c",
sizes=sizes,
)
sizes = dict(zip(long_df["a"].unique(), sizes))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_sizes = [sizes[val] for val in grp_df["a"]]
assert_array_equal(points.get_sizes(), expected_sizes)
def test_relplot_styles(self, long_df):
markers = ["o", "d", "s"]
g = relplot(
data=long_df,
x="x", y="y", style="a", hue="b", col="c",
markers=markers,
)
paths = []
for m in markers:
m = mpl.markers.MarkerStyle(m)
paths.append(m.get_path().transformed(m.get_transform()))
paths = dict(zip(long_df["a"].unique(), paths))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_paths = [paths[val] for val in grp_df["a"]]
assert self.paths_equal(points.get_paths(), expected_paths)
def test_relplot_weighted_estimator(self, long_df):
g = relplot(data=long_df, x="a", y="y", weights="x", kind="line")
ydata = g.ax.lines[0].get_ydata()
for i, level in enumerate(categorical_order(long_df["a"])):
pos_df = long_df[long_df["a"] == level]
expected = np.average(pos_df["y"], weights=pos_df["x"])
assert ydata[i] == pytest.approx(expected)
def test_relplot_stringy_numerics(self, long_df):
long_df["x_str"] = long_df["x"].astype(str)
g = relplot(data=long_df, x="x", y="y", hue="x_str")
points = g.ax.collections[0]
xys = points.get_offsets()
mask = np.ma.getmask(xys)
assert not mask.any()
assert_array_equal(xys, long_df[["x", "y"]])
g = relplot(data=long_df, x="x", y="y", size="x_str")
points = g.ax.collections[0]
xys = points.get_offsets()
mask = np.ma.getmask(xys)
assert not mask.any()
assert_array_equal(xys, long_df[["x", "y"]])
def test_relplot_legend(self, long_df):
g = relplot(data=long_df, x="x", y="y")
assert g._legend is None
g = relplot(data=long_df, x="x", y="y", hue="a")
texts = [t.get_text() for t in g._legend.texts]
expected_texts = long_df["a"].unique()
assert_array_equal(texts, expected_texts)
g = relplot(data=long_df, x="x", y="y", hue="s", size="s")
texts = [t.get_text() for t in g._legend.texts]
assert_array_equal(texts, np.sort(texts))
g = relplot(data=long_df, x="x", y="y", hue="a", legend=False)
assert g._legend is None
palette = color_palette("deep", len(long_df["b"].unique()))
a_like_b = dict(zip(long_df["a"].unique(), long_df["b"].unique()))
long_df["a_like_b"] = long_df["a"].map(a_like_b)
g = relplot(
data=long_df,
x="x", y="y", hue="b", style="a_like_b",
palette=palette, kind="line", estimator=None,
)
lines = g._legend.get_lines()[1:] # Chop off title dummy
for line, color in zip(lines, palette):
assert line.get_color() == color
def test_relplot_unshared_axis_labels(self, long_df):
col, row = "a", "b"
g = relplot(
data=long_df, x="x", y="y", col=col, row=row,
facet_kws=dict(sharex=False, sharey=False),
)
for ax in g.axes[-1, :].flat:
assert ax.get_xlabel() == "x"
for ax in g.axes[:-1, :].flat:
assert ax.get_xlabel() == ""
for ax in g.axes[:, 0].flat:
assert ax.get_ylabel() == "y"
for ax in g.axes[:, 1:].flat:
assert ax.get_ylabel() == ""
def test_relplot_data(self, long_df):
g = relplot(
data=long_df.to_dict(orient="list"),
x="x",
y=long_df["y"].rename("y_var"),
hue=long_df["a"].to_numpy(),
col="c",
)
expected_cols = set(long_df.columns.to_list() + ["_hue_", "y_var"])
assert set(g.data.columns) == expected_cols
assert_array_equal(g.data["y_var"], long_df["y"])
assert_array_equal(g.data["_hue_"], long_df["a"])
def test_facet_variable_collision(self, long_df):
# https://github.com/mwaskom/seaborn/issues/2488
col_data = long_df["c"]
long_df = long_df.assign(size=col_data)
g = relplot(
data=long_df,
x="x", y="y", col="size",
)
assert g.axes.shape == (1, len(col_data.unique()))
def test_relplot_scatter_unused_variables(self, long_df):
with pytest.warns(UserWarning, match="The `units` parameter"):
g = relplot(long_df, x="x", y="y", units="a")
assert g.ax is not None
with pytest.warns(UserWarning, match="The `weights` parameter"):
g = relplot(long_df, x="x", y="y", weights="x")
assert g.ax is not None
def test_ax_kwarg_removal(self, long_df):
f, ax = plt.subplots()
with pytest.warns(UserWarning):
g = relplot(data=long_df, x="x", y="y", ax=ax)
assert len(ax.collections) == 0
assert len(g.ax.collections) > 0
def test_legend_has_no_offset(self, long_df):
g = relplot(data=long_df, x="x", y="y", hue=long_df["z"] + 1e8)
for text in g.legend.texts:
assert float(text.get_text()) > 1e7
def test_lineplot_2d_dashes(self, long_df):
ax = lineplot(data=long_df[["x", "y"]], dashes=[(5, 5), (10, 10)])
for line in ax.get_lines():
assert line.is_dashed()
def test_legend_attributes_hue(self, long_df):
kws = {"s": 50, "linewidth": 1, "marker": "X"}
g = relplot(long_df, x="x", y="y", hue="a", **kws)
palette = color_palette()
for i, pt in enumerate(get_legend_handles(g.legend)):
assert same_color(pt.get_color(), palette[i])
assert pt.get_markersize() == np.sqrt(kws["s"])
assert pt.get_markeredgewidth() == kws["linewidth"]
if not _version_predates(mpl, "3.7.0"):
assert pt.get_marker() == kws["marker"]
def test_legend_attributes_style(self, long_df):
kws = {"s": 50, "linewidth": 1, "color": "r"}
g = relplot(long_df, x="x", y="y", style="a", **kws)
for pt in get_legend_handles(g.legend):
assert pt.get_markersize() == np.sqrt(kws["s"])
assert pt.get_markeredgewidth() == kws["linewidth"]
assert same_color(pt.get_color(), "r")
def test_legend_attributes_hue_and_style(self, long_df):
kws = {"s": 50, "linewidth": 1}
g = relplot(long_df, x="x", y="y", hue="a", style="b", **kws)
for pt in get_legend_handles(g.legend):
if pt.get_label() not in ["a", "b"]:
assert pt.get_markersize() == np.sqrt(kws["s"])
assert pt.get_markeredgewidth() == kws["linewidth"]
| TestRelationalPlotter |
python | pypa__setuptools | setuptools/_distutils/command/install_scripts.py | {
"start": 286,
"end": 2002
} | class ____(Command):
description = "install scripts (Python or otherwise)"
user_options = [
('install-dir=', 'd', "directory to install scripts to"),
('build-dir=', 'b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options: ClassVar[list[str]] = ['force', 'skip-build']
def initialize_options(self):
self.install_dir = None
self.force = False
self.build_dir = None
self.skip_build = None
def finalize_options(self) -> None:
self.set_undefined_options('build', ('build_scripts', 'build_dir'))
self.set_undefined_options(
'install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run(self) -> None:
if not self.skip_build:
self.run_command('build_scripts')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
log.info("changing mode of %s", file)
else:
mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777
log.info("changing mode of %s to %o", file, mode)
os.chmod(file, mode)
def get_inputs(self):
return self.distribution.scripts or []
def get_outputs(self):
return self.outfiles or []
| install_scripts |
python | catalyst-team__catalyst | catalyst/metrics/_classification.py | {
"start": 5875,
"end": 11405
} | class ____(ICallbackBatchMetric):
"""
This metric accumulates true positive, false positive, true negative,
false negative, support statistics from multiclass data.
Args:
compute_on_call: if True, computes and returns metric value during metric call
prefix: metric prefix
suffix: metric suffix
num_classes: number of classes
Raises:
ValueError: if mode is incorrect
Examples:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# sample data
num_samples, num_features, num_classes = int(1e4), int(1e1), 4
X = torch.rand(num_samples, num_features)
y = (torch.rand(num_samples,) * num_classes).to(torch.int64)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_classes)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features",
output_key="logits",
target_key="targets",
loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=3,
valid_loader="valid",
valid_metric="accuracy03",
minimize_valid_metric=False,
verbose=True,
callbacks=[
dl.AccuracyCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.PrecisionRecallF1SupportCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.AUCCallback(input_key="logits", target_key="targets"),
],
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
compute_on_call: bool = True,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
num_classes: Optional[int] = None,
):
"""Init params"""
super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
self.statistics = None
self.num_classes = num_classes
self._ddp_backend = None
self.reset()
# multiprocessing could not handle lamdas, so..
def _mp_hack(self):
return np.zeros(shape=(self.num_classes,))
def reset(self) -> None:
"""Reset all the statistics."""
self.statistics = defaultdict(self._mp_hack)
self._ddp_backend = get_backend()
def update(
self, outputs: torch.Tensor, targets: torch.Tensor
) -> Union[Tuple[int, int, int, int, int, int], Tuple[Any, Any, Any, Any, Any, int]]:
"""
Compute statistics from outputs and targets,
update accumulated statistics with new values.
Args:
outputs: prediction values
targets: true answers
Returns:
Tuple of int or array: true negative, false positive, false
negative, true positive, support statistics and num_classes
"""
tn, fp, fn, tp, support, num_classes = get_multiclass_statistics(
outputs=outputs.cpu().detach(),
targets=targets.cpu().detach(),
num_classes=self.num_classes,
)
tn = tn.numpy()
fp = fp.numpy()
fn = fn.numpy()
tp = tp.numpy()
support = support.numpy()
if self.num_classes is None:
self.num_classes = num_classes
self.statistics["tn"] += tn
self.statistics["fp"] += fp
self.statistics["fn"] += fn
self.statistics["tp"] += tp
self.statistics["support"] += support
return tn, fp, fn, tp, support, self.num_classes
def update_key_value(
self, outputs: torch.Tensor, targets: torch.Tensor
) -> Dict[str, float]:
"""
Update statistics and return statistics intermediate result
Args:
outputs: prediction values
targets: true answers
Returns:
dict of statistics for current input
"""
tn, fp, fn, tp, support, _ = self.update(outputs=outputs, targets=targets)
return {"fn": fn, "fp": fp, "support": support, "tn": tn, "tp": tp}
def compute(self) -> Dict[str, Union[int, np.array]]:
"""
Return accumulated statistics
Returns:
dict of statistics
"""
return self.statistics
def compute_key_value(self) -> Dict[str, float]:
"""
Return accumulated statistics
Returns:
dict of statistics
Examples:
>>> {"tp": np.array([1, 2, 1]), "fp": np.array([2, 1, 0]), ...}
"""
result = self.compute()
return {k: result[k] for k in sorted(result.keys())}
| MulticlassStatisticsMetric |
python | getsentry__sentry | src/sentry/analytics/events/first_flag_sent.py | {
"start": 72,
"end": 233
} | class ____(analytics.Event):
organization_id: int
project_id: int
platform: str | None = None
analytics.register(FirstFlagSentEvent)
| FirstFlagSentEvent |
python | getsentry__sentry | src/sentry/users/services/user/model.py | {
"start": 1510,
"end": 3886
} | class ____(RpcUserProfile):
roles: frozenset[str] = frozenset()
permissions: frozenset[str] = frozenset()
avatar: RpcAvatar | None = None
emails: frozenset[str] = frozenset()
useremails: list[RpcUserEmail] = Field(default_factory=list)
authenticators: list[RpcAuthenticator] = Field(default_factory=list)
def __hash__(self) -> int:
# Mimic the behavior of hashing a Django ORM entity, for compatibility with
# legacy code that treats User entities as dict keys.
# TODO: Remove the need for this
return hash((self.id, self.pk))
def __str__(self) -> str: # API compatibility with ORM User
return self.get_username()
def by_email(self, email: str) -> "RpcUser":
if email == self.email:
return self
return self.copy(update=dict(email=email))
def has_unverified_emails(self) -> bool:
return len(self.get_unverified_emails()) > 0
def has_verified_emails(self) -> bool:
return len(self.get_verified_emails()) > 0
def has_verified_primary_email(self) -> bool:
return bool([e for e in self.useremails if e.is_verified and e.email == self.email])
def get_unverified_emails(self) -> list[RpcUserEmail]:
return [e for e in self.useremails if not e.is_verified]
def get_verified_emails(self) -> list[RpcUserEmail]:
return [e for e in self.useremails if e.is_verified]
def has_usable_password(self) -> bool:
return self.password_usable
def get_username(self) -> str: # API compatibility with ORM User
return self.username
def get_display_name(self) -> str: # API compatibility with ORM User
return self.display_name
def get_label(self) -> str: # API compatibility with ORM User
return self.label
def get_full_name(self) -> str:
return self.name
def get_salutation_name(self) -> str:
name = self.name or self.username.split("@", 1)[0].split(".", 1)[0]
first_name = name.split(" ", 1)[0]
return first_name.capitalize()
def get_avatar_type(self) -> str:
if self.avatar is not None:
return self.avatar.avatar_type
return "letter_avatar"
def class_name(self) -> str:
return "User"
def has_2fa(self) -> bool:
return any(a.type != 0 for a in self.authenticators)
| RpcUser |
python | gevent__gevent | src/greentest/3.13/test_httplib.py | {
"start": 87489,
"end": 99504
} | class ____(TestCase, ExtraAssertions):
def setUp(self):
response_text = (
'HTTP/1.1 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.port = client.HTTP_PORT
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers_add_host_missing(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
tunnel_headers_after = tunnel_headers.copy()
tunnel_headers_after['Host'] = '%s:%d' % (tunnel_host, tunnel_port)
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers_after)
def test_set_tunnel_host_port_headers_set_host_identical(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)',
'Host': '%s:%d' % (tunnel_host, tunnel_port)}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_set_tunnel_host_port_headers_set_host_different(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)',
'Host': '%s:%d' % ('example.com', 4200)}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
d = {
b'host': b'destination.com',
b'port': client.HTTP_PORT,
}
self.conn.set_tunnel(d[b'host'].decode('ascii'))
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n'
b'Host: %(host)s:%(port)d\r\n\r\n' % d,
self.conn.sock.data)
self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s\r\n' % d,
self.conn.sock.data)
def test_connect_with_tunnel_with_default_port(self):
d = {
b'host': b'destination.com',
b'port': client.HTTP_PORT,
}
self.conn.set_tunnel(d[b'host'].decode('ascii'), port=d[b'port'])
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n'
b'Host: %(host)s:%(port)d\r\n\r\n' % d,
self.conn.sock.data)
self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s\r\n' % d,
self.conn.sock.data)
def test_connect_with_tunnel_with_nonstandard_port(self):
d = {
b'host': b'destination.com',
b'port': 8888,
}
self.conn.set_tunnel(d[b'host'].decode('ascii'), port=d[b'port'])
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n'
b'Host: %(host)s:%(port)d\r\n\r\n' % d,
self.conn.sock.data)
self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s:%(port)d\r\n' % d,
self.conn.sock.data)
# This request is not RFC-valid, but it's been possible with the library
# for years, so don't break it unexpectedly... This also tests
# case-insensitivity when injecting Host: headers if they're missing.
def test_connect_with_tunnel_with_different_host_header(self):
d = {
b'host': b'destination.com',
b'tunnel_host_header': b'example.com:9876',
b'port': client.HTTP_PORT,
}
self.conn.set_tunnel(
d[b'host'].decode('ascii'),
headers={'HOST': d[b'tunnel_host_header'].decode('ascii')})
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n'
b'HOST: %(tunnel_host_header)s\r\n\r\n' % d,
self.conn.sock.data)
self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s\r\n' % d,
self.conn.sock.data)
def test_connect_with_tunnel_different_host(self):
d = {
b'host': b'destination.com',
b'port': client.HTTP_PORT,
}
self.conn.set_tunnel(d[b'host'].decode('ascii'))
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n'
b'Host: %(host)s:%(port)d\r\n\r\n' % d,
self.conn.sock.data)
self.assertIn(b'HEAD / HTTP/1.1\r\nHost: %(host)s\r\n' % d,
self.conn.sock.data)
def test_connect_with_tunnel_idna(self):
dest = '\u03b4\u03c0\u03b8.gr'
dest_port = b'%s:%d' % (dest.encode('idna'), client.HTTP_PORT)
expected = b'CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n' % (
dest_port, dest_port)
self.conn.set_tunnel(dest)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(expected, self.conn.sock.data)
def test_tunnel_connect_single_send_connection_setup(self):
"""Regresstion test for https://bugs.python.org/issue43332."""
with mock.patch.object(self.conn, 'send') as mock_send:
self.conn.set_tunnel('destination.com')
self.conn.connect()
self.conn.request('GET', '/')
mock_send.assert_called()
# Likely 2, but this test only cares about the first.
self.assertGreater(
len(mock_send.mock_calls), 1,
msg=f'unexpected number of send calls: {mock_send.mock_calls}')
proxy_setup_data_sent = mock_send.mock_calls[0][1][0]
self.assertIn(b'CONNECT destination.com', proxy_setup_data_sent)
self.assertEndsWith(proxy_setup_data_sent, b'\r\n\r\n',
msg=f'unexpected proxy data sent {proxy_setup_data_sent!r}')
def test_connect_put_request(self):
d = {
b'host': b'destination.com',
b'port': client.HTTP_PORT,
}
self.conn.set_tunnel(d[b'host'].decode('ascii'))
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, self.port)
self.assertIn(b'CONNECT %(host)s:%(port)d HTTP/1.1\r\n'
b'Host: %(host)s:%(port)d\r\n\r\n' % d,
self.conn.sock.data)
self.assertIn(b'PUT / HTTP/1.1\r\nHost: %(host)s\r\n' % d,
self.conn.sock.data)
def test_connect_put_request_ipv6(self):
self.conn.set_tunnel('[1:2:3::4]', 1234)
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT [1:2:3::4]:1234', self.conn.sock.data)
self.assertIn(b'Host: [1:2:3::4]:1234', self.conn.sock.data)
def test_connect_put_request_ipv6_port(self):
self.conn.set_tunnel('[1:2:3::4]:1234')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT [1:2:3::4]:1234', self.conn.sock.data)
self.assertIn(b'Host: [1:2:3::4]:1234', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
def test_proxy_response_headers(self):
expected_header = ('X-Dummy', '1')
response_text = (
'HTTP/1.0 200 OK\r\n'
'{0}\r\n\r\n'.format(':'.join(expected_header))
)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
headers = self.conn.get_proxy_response_headers()
self.assertIn(expected_header, headers.items())
def test_no_proxy_response_headers(self):
expected_header = ('X-Dummy', '1')
response_text = (
'HTTP/1.0 200 OK\r\n'
'{0}\r\n\r\n'.format(':'.join(expected_header))
)
self.conn._create_connection = self._create_connection(response_text)
self.conn.request('PUT', '/', '')
headers = self.conn.get_proxy_response_headers()
self.assertIsNone(headers)
def test_tunnel_leak(self):
sock = None
def _create_connection(address, timeout=None, source_address=None):
nonlocal sock
sock = FakeSocket(
'HTTP/1.1 404 NOT FOUND\r\n\r\n',
host=address[0],
port=address[1],
)
return sock
self.conn._create_connection = _create_connection
self.conn.set_tunnel('destination.com')
exc = None
try:
self.conn.request('HEAD', '/', '')
except OSError as e:
# keeping a reference to exc keeps response alive in the traceback
exc = e
self.assertIsNotNone(exc)
self.assertTrue(sock.file_closed)
if __name__ == '__main__':
unittest.main(verbosity=2)
| TunnelTests |
python | pytorch__pytorch | torch/_higher_order_ops/triton_kernel_wrap.py | {
"start": 83902,
"end": 85802
} | class ____:
kernel: "TritonKernelType"
kernel_idx: Optional[int]
grid: Optional["TritonGridType"]
def __init__(
self,
kernel: "TritonKernelType",
kernel_idx: Optional[int],
grid: Optional["TritonGridType"],
) -> None:
# pyrefly: ignore # bad-assignment
self.kernel = None
self.grid = None
tracing_triton_hopifier_singleton.init_variable(self, kernel, kernel_idx, grid)
assert self.kernel is not None
def __getitem__(self, *args: Sequence[Any]) -> "TraceableTritonKernelWrapper":
return tracing_triton_hopifier_singleton.call_getitem(self, args) # type: ignore[return-value]
def run(self, *args: Sequence[Any], **kwargs: dict[str, Any]) -> Any:
from torch._library.triton import is_wrap_triton_enabled
if is_wrap_triton_enabled():
return tracing_triton_hopifier_singleton.call_run(self, args, kwargs, None)
else:
assert self.kernel is not None
# pyrefly: ignore [missing-attribute]
return self.kernel.run(*args, **kwargs)
def __call__(self, *args: Sequence[Any], **kwargs: dict[str, Any]) -> Any:
from torch._library.triton import is_wrap_triton_enabled
if is_wrap_triton_enabled():
return tracing_triton_hopifier_singleton.call_triton_kernel(
self, args, kwargs, None
)
else:
assert self.kernel is not None
# pyrefly: ignore [index-error]
return self.kernel[self.grid](*args, **kwargs)
def specialize_symbolic(self, arg: Sequence[Any]) -> Any:
import torch
# See [Note: Specialize tl.constexpr args in user-defined triton kernels]
if isinstance(arg, (torch.SymInt, torch.SymBool, torch.SymFloat)):
return guard_scalar(arg)
return arg
| TraceableTritonKernelWrapper |
python | pyqtgraph__pyqtgraph | pyqtgraph/debug.py | {
"start": 21076,
"end": 37036
} | class ____(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in list(self.startRefs.keys()):
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in list(self.newRefs.keys()):
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(key=lambda a: c1[a])
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
try:
del objs[id(frame)] ## ignore the current frame
except KeyError:
pass
try:
del objs[id(frame.f_code)]
except KeyError:
pass
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(o)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(key=lambda a: count[a][1])
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == getattr(types, 'InstanceType', None):
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
if counts is None:
counts = {}
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
| ObjTracker |
python | boto__boto3 | tests/integration/test_sqs.py | {
"start": 624,
"end": 1409
} | class ____(unittest.TestCase):
def setUp(self):
self.session = boto3.session.Session(region_name='us-west-2')
self.sqs = self.session.resource('sqs')
self.queue_name = unique_id('boto3-test')
def test_sqs(self):
# Create a new resource
queue = self.sqs.create_queue(QueueName=self.queue_name)
self.addCleanup(queue.delete)
# Call an action
queue.send_message(MessageBody='test')
# Get pre-populated resources and access attributes
messages = queue.receive_messages(WaitTimeSeconds=1)
self.assertEqual(len(messages), 1)
self.addCleanup(messages[0].delete)
self.assertEqual(queue.url, messages[0].queue_url)
self.assertEqual('test', messages[0].body)
| TestSQSResource |
python | pypa__warehouse | warehouse/macaroons/caveats/_core.py | {
"start": 693,
"end": 834
} | class ____:
reason: str
def __bool__(self):
return False
Result = Success | Failure
@pydantic_dataclass(frozen=True)
| Failure |
python | altair-viz__altair | tools/generate_schema_wrapper.py | {
"start": 10320,
"end": 12414
} | class ____:
def encode(self, *args: Any, {method_args}) -> Self:
"""Map properties of the data to visual properties of the chart (see :class:`FacetedEncoding`)
{docstring}"""
kwargs = {dict_literal}
if args:
kwargs = {{k: v for k, v in kwargs.items() if v is not Undefined}}
# Convert args to kwargs based on their types.
kwargs = _infer_encoding_types(args, kwargs)
# get a copy of the dict representation of the previous encoding
# ignore type as copy method comes from SchemaBase
copy = self.copy(deep=['encoding']) # type: ignore[attr-defined]
encoding = copy._get('encoding', {{}})
if isinstance(encoding, core.VegaLiteSchema):
encoding = {{k: v for k, v in encoding._kwds.items() if v is not Undefined}}
# update with the new encodings, and apply them to the copy
encoding.update(kwargs)
copy.encoding = core.FacetedEncoding(**encoding)
return copy
'''
# Enables use of ~, &, | with compositions of selection objects.
DUNDER_PREDICATE_COMPOSITION = """
def __invert__(self) -> PredicateComposition:
return PredicateComposition({"not": self.to_dict()})
def __and__(self, other: SchemaBase) -> PredicateComposition:
return PredicateComposition({"and": [self.to_dict(), other.to_dict()]})
def __or__(self, other: SchemaBase) -> PredicateComposition:
return PredicateComposition({"or": [self.to_dict(), other.to_dict()]})
"""
# NOTE: Not yet reasonable to generalize `TypeAliasType`, `TypeVar`
# Revisit if this starts to become more common
TYPING_EXTRA: Final = '''
T = TypeVar("T")
OneOrSeq = TypeAliasType("OneOrSeq", Union[T, Sequence[T]], type_params=(T,))
"""
One of ``T`` specified type(s), or a `Sequence` of such.
Examples
--------
The parameters ``short``, ``long`` accept the same range of types::
# ruff: noqa: UP006, UP007
def func(
short: OneOrSeq[str | bool | float],
long: Union[str, bool, float, Sequence[Union[str, bool, float]],
): ...
"""
| _EncodingMixin |
python | doocs__leetcode | solution/3100-3199/3108.Minimum Cost Walk in Weighted Graph/Solution.py | {
"start": 563,
"end": 1093
} | class ____:
def minimumCost(
self, n: int, edges: List[List[int]], query: List[List[int]]
) -> List[int]:
g = [-1] * n
uf = UnionFind(n)
for u, v, _ in edges:
uf.union(u, v)
for u, _, w in edges:
root = uf.find(u)
g[root] &= w
def f(u: int, v: int) -> int:
if u == v:
return 0
a, b = uf.find(u), uf.find(v)
return g[a] if a == b else -1
return [f(s, t) for s, t in query]
| Solution |
python | pdm-project__pdm | tests/test_utils.py | {
"start": 6003,
"end": 9432
} | class ____:
def test_non_file_url(self):
with pytest.raises(ValueError):
utils.url_to_path("not_a_file_scheme://netloc/path")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Non-Windows test")
def test_non_windows_non_local_file_url(self):
with pytest.raises(ValueError):
utils.url_to_path("file://non_local_netloc/file/url")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Non-Windows test")
def test_non_windows_localhost_local_file_url(self):
assert utils.url_to_path("file://localhost/local/file/path") == "/local/file/path"
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="Windows test")
def test_windows_localhost_local_file_url(self):
assert utils.url_to_path("file://localhost/local/file/path") == "\\local\\file\\path"
@pytest.mark.parametrize(
"given,expected",
[
("test", "test"),
("", ""),
("${FOO}", "hello"),
("$FOO", "$FOO"),
("${BAR}", ""),
("%FOO%", "%FOO%"),
("${FOO}_${FOO}", "hello_hello"),
],
)
def test_expand_env_vars(given, expected, monkeypatch):
monkeypatch.setenv("FOO", "hello")
assert utils.expand_env_vars(given) == expected
@pytest.mark.parametrize(
"given,expected",
[
("https://example.org/path?arg=1", "https://example.org/path?arg=1"),
(
"https://${FOO}@example.org/path?arg=1",
"https://token%3Aoidc%2F1@example.org/path?arg=1",
),
(
"https://${FOO}:${BAR}@example.org/path?arg=1",
"https://token%3Aoidc%2F1:p%40ssword@example.org/path?arg=1",
),
(
"https://${FOOBAR}@example.org/path?arg=1",
"https://@example.org/path?arg=1",
),
],
)
def test_expand_env_vars_in_auth(given, expected, monkeypatch):
monkeypatch.setenv("FOO", "token:oidc/1")
monkeypatch.setenv("BAR", "p@ssword")
assert utils.expand_env_vars_in_auth(given) == expected
@pytest.mark.parametrize(
"os_name,given,expected",
[
("posix", ("match", "repl", "/a/b/match/c/match/d/e"), "/a/b/repl/c/repl/d/e"),
("posix", ("old", "new", "/path/to/old/pdm"), "/path/to/new/pdm"),
("posix", ("match", "repl", "match/a/math/b/match/c"), "repl/a/math/b/repl/c"),
("posix", ("match", "repl", "/some/path"), "/some/path"),
("posix", ("match", "repl", ""), ""),
("nt", ("old", "new", "C:\\Path\\tO\\old\\pdm"), "C:/Path/tO/new/pdm"),
("nt", ("old", "new", "C:\\Path\\tO\\Old\\pdm"), "C:/Path/tO/new/pdm"),
("nt", ("old", "new", "C:\\no\\matching\\path"), "C:/no/matching/path"),
],
)
def test_path_replace(os_name, given, expected):
with mock.patch("pdm.utils.os_name", os_name):
pattern, replace_with, dest = given
assert utils.path_replace(pattern, replace_with, dest) == expected
# Only testing POSIX-style paths here
@pytest.mark.parametrize(
"given,expected",
[
(("/", "/"), True),
(("/a", "/"), True),
(("/a/b", "/a"), True),
(("/a", "/b"), False),
(("a", "b"), False),
(("/a/b", "/c/d"), False),
(("/a/b/c", "/a"), True),
(("../a/b/c", "../a"), True),
],
)
def test_is_path_relative_to(given, expected):
path, other = given
assert utils.is_path_relative_to(path, other) == expected
| TestUrlToPath |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper_fxir.py | {
"start": 2355,
"end": 2800
} | class ____(CodegenSymbol):
"""
Represents a sympy.Symbol graph input.
"""
symbol: sympy.Symbol
def get_name(self) -> str:
return str(self.symbol)
def get_example(self) -> Union[torch.Tensor, torch.SymInt]:
sym_int = convert_to_symint(self.symbol)
assert isinstance(sym_int, torch.SymInt)
return sym_int
CodegenBuffer = Union[BufferLike, SymbolBuffer]
@dataclasses.dataclass
| SymbolBuffer |
python | zarr-developers__zarr-python | tests/package_with_entrypoint/__init__.py | {
"start": 624,
"end": 1149
} | class ____(ArrayBytesCodec):
is_fixed_size = True
async def encode(
self,
chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]],
) -> Iterable[Buffer | None]:
return [None]
async def decode(
self,
chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]],
) -> npt.NDArray[Any]:
return np.array(1)
def compute_encoded_size(self, input_byte_length: int, chunk_spec: ArraySpec) -> int:
return input_byte_length
| TestEntrypointCodec |
python | ray-project__ray | python/ray/experimental/collective/communicator.py | {
"start": 129,
"end": 423
} | class ____:
"""
A handle to a communicator that we are a member of.
"""
# The name of the communicator.
name: str
# Our rank in the collective group.
rank: int
# A valid backend, as defined by
# ray.util.collective.types.Backend.
backend: str
| Communicator |
python | sphinx-doc__sphinx | sphinx/util/i18n.py | {
"start": 3314,
"end": 11930
} | class ____:
"""A repository for message catalogs."""
def __init__(
self,
basedir: str | os.PathLike[str],
locale_dirs: list[str],
language: str,
encoding: str,
) -> None:
self.basedir = _StrPath(basedir)
self._locale_dirs = locale_dirs
self.language = language
self.encoding = encoding
@property
def locale_dirs(self) -> Iterator[_StrPath]:
if not self.language:
return
for locale_dir in self._locale_dirs:
locale_path = self.basedir / locale_dir / self.language / 'LC_MESSAGES'
if locale_path.exists():
yield self.basedir / locale_dir
else:
logger.verbose(__('locale_dir %s does not exist'), locale_path)
@property
def pofiles(self) -> Iterator[tuple[_StrPath, _StrPath]]:
for locale_dir in self.locale_dirs:
locale_path = locale_dir / self.language / 'LC_MESSAGES'
for abs_path in locale_path.rglob('*.po'):
rel_path = abs_path.relative_to(locale_path)
# skip dot-directories
if any(part.startswith('.') for part in rel_path.parts[:-1]):
continue
yield locale_path, rel_path
@property
def catalogs(self) -> Iterator[CatalogInfo]:
for basedir, filename in self.pofiles:
domain = filename.with_suffix('').as_posix()
yield CatalogInfo(basedir, domain, self.encoding)
def docname_to_domain(docname: str, compaction: bool | str) -> str:
"""Convert docname to domain for catalogs."""
if isinstance(compaction, str):
return compaction
if compaction:
return docname.partition(SEP)[0]
else:
return docname
# date_format mappings: ustrftime() to babel.dates.format_datetime()
date_format_mappings = {
'%a': 'EEE', # Weekday as locale's abbreviated name.
'%A': 'EEEE', # Weekday as locale's full name.
'%b': 'MMM', # Month as locale's abbreviated name.
'%B': 'MMMM', # Month as locale's full name.
'%c': 'medium', # Locale's appropriate date and time representation.
'%-d': 'd', # Day of the month as a decimal number.
'%d': 'dd', # Day of the month as a zero-padded decimal number.
'%-H': 'H', # Hour (24-hour clock) as a decimal number [0,23].
'%H': 'HH', # Hour (24-hour clock) as a zero-padded decimal number [00,23].
'%-I': 'h', # Hour (12-hour clock) as a decimal number [1,12].
'%I': 'hh', # Hour (12-hour clock) as a zero-padded decimal number [01,12].
'%-j': 'D', # Day of the year as a decimal number.
'%j': 'DDD', # Day of the year as a zero-padded decimal number.
'%-m': 'M', # Month as a decimal number.
'%m': 'MM', # Month as a zero-padded decimal number.
'%-M': 'm', # Minute as a decimal number [0,59].
'%M': 'mm', # Minute as a zero-padded decimal number [00,59].
'%p': 'a', # Locale's equivalent of either AM or PM.
'%-S': 's', # Second as a decimal number.
'%S': 'ss', # Second as a zero-padded decimal number.
'%U': 'WW', # Week number of the year (Sunday as the first day of the week)
# as a zero padded decimal number. All days in a new year preceding
# the first Sunday are considered to be in week 0.
'%w': 'e', # Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.
'%-W': 'W', # Week number of the year (Monday as the first day of the week)
# as a decimal number. All days in a new year preceding the first
# Monday are considered to be in week 0.
'%W': 'WW', # Week number of the year (Monday as the first day of the week)
# as a zero-padded decimal number.
'%x': 'medium', # Locale's appropriate date representation.
'%X': 'medium', # Locale's appropriate time representation.
'%y': 'YY', # Year without century as a zero-padded decimal number.
'%Y': 'yyyy', # Year with century as a decimal number.
'%Z': 'zzz', # Time zone name (no characters if no time zone exists).
'%z': 'ZZZ', # UTC offset in the form ±HHMM[SS[.ffffff]]
# (empty string if the object is naive).
'%%': '%',
} # fmt: skip
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
def babel_format_date(
date: datetime,
format: str,
locale: str,
formatter: Formatter = babel.dates.format_date,
) -> str:
# Check if we have the tzinfo attribute. If not we cannot do any time
# related formats.
if not hasattr(date, 'tzinfo'):
formatter = babel.dates.format_date
if not locale:
# Babel would not accept a falsy locale
# (or would try to fall back to the LC_TIME
# locale, which would be not what was requested),
# so we can just short-cut to English, as we
# would for the `"fallback to English"` case.
locale = 'en'
try:
return formatter(date, format, locale=locale)
except (ValueError, babel.core.UnknownLocaleError):
# fallback to English
logger.warning(
__('Invalid Babel locale: %r.'),
locale,
type='i18n',
subtype='babel',
)
return formatter(date, format, locale='en')
except AttributeError:
logger.warning(
__(
'Invalid date format. Quote the string by single quote '
'if you want to output it directly: %s'
),
format,
type='i18n',
subtype='babel',
)
return format
def format_date(
format: str,
*,
date: datetime | None = None,
language: str,
local_time: bool = False,
) -> str:
if date is None:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
date = datetime.fromtimestamp(float(source_date_epoch), tz=UTC)
# If SOURCE_DATE_EPOCH is set, users likely want a reproducible result,
# so enforce GMT/UTC for consistency.
local_time = False
else:
date = datetime.now(tz=UTC)
if local_time:
# > If called with tz=None, the system local time zone
# > is assumed for the target time zone.
# https://docs.python.org/dev/library/datetime.html#datetime.datetime.astimezone
date = date.astimezone(tz=None)
result = []
tokens = date_format_re.split(format)
for token in tokens:
if token in date_format_mappings:
babel_format = date_format_mappings.get(token, '')
# Check if we have to use a different babel formatter then
# format_datetime, because we only want to format a date
# or a time.
function: Formatter
if token == '%x':
function = babel.dates.format_date
elif token == '%X':
function = babel.dates.format_time
else:
function = babel.dates.format_datetime
result.append(
babel_format_date(
date, babel_format, locale=language, formatter=function
)
)
else:
result.append(token)
return ''.join(result)
def get_image_filename_for_language(
filename: str | os.PathLike[str],
env: BuildEnvironment,
) -> str:
root, ext = os.path.splitext(filename)
dirname = os.path.dirname(root)
docpath = os.path.dirname(env.current_document.docname)
try:
return env.config.figure_language_filename.format(
root=root,
ext=ext,
path=dirname and dirname + SEP,
basename=os.path.basename(root),
docpath=docpath and docpath + SEP,
language=env.config.language,
)
except KeyError as exc:
msg = f'Invalid figure_language_filename: {exc!r}'
raise SphinxError(msg) from exc
def search_image_for_language(filename: str, env: BuildEnvironment) -> str:
translated = get_image_filename_for_language(filename, env)
_, abspath = env.relfn2path(translated)
if os.path.exists(abspath):
return translated
else:
return filename
| CatalogRepository |
python | walkccc__LeetCode | solutions/2764. is Array a Preorder of Some Binary Tree/2764.py | {
"start": 0,
"end": 349
} | class ____:
def isPreorder(self, nodes: list[list[int]]) -> bool:
stack = [] # Stores `id`s.
for id, parentId in nodes:
if parentId == -1:
stack.append(id)
continue
while stack and stack[-1] != parentId:
stack.pop()
if not stack:
return False
stack.append(id)
return True
| Solution |
python | realpython__materials | django-todo-list/source_code_final/todo_app/views.py | {
"start": 196,
"end": 291
} | class ____(ListView):
model = ToDoList
template_name = "todo_app/index.html"
| ListListView |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-artifact-editor/llama_index/tools/artifact_editor/base.py | {
"start": 809,
"end": 1026
} | class ____(BaseModel):
"""Collection of patch operations to apply to any Pydantic model."""
operations: List[PatchOperation] = Field(
..., description="List of patch operations to apply"
)
| JsonPatch |
python | pydantic__pydantic | tests/test_pickle.py | {
"start": 5021,
"end": 5266
} | class ____:
a: int
b: float
def builtin_dataclass_factory() -> type:
@dataclasses.dataclass
class NonImportableBuiltinDataclass:
a: int
b: float
return NonImportableBuiltinDataclass
| ImportableBuiltinDataclass |
python | jpadilla__pyjwt | jwt/exceptions.py | {
"start": 91,
"end": 200
} | class ____(PyJWTError):
"""Base exception when ``decode()`` fails on a token"""
pass
| InvalidTokenError |
python | apache__airflow | airflow-core/tests/unit/dags/test_only_empty_tasks.py | {
"start": 1246,
"end": 2035
} | class ____(EmptyOperator):
template_fields_renderers = {"body": "json"}
template_fields: Sequence[str] = ("body",)
def __init__(self, body, *args, **kwargs):
super().__init__(*args, **kwargs)
self.body = body
with dag:
task_a = EmptyOperator(task_id="test_task_a")
task_b = EmptyOperator(task_id="test_task_b")
task_a >> task_b
MyEmptyOperator(task_id="test_task_c", body={"hello": "world"})
EmptyOperator(task_id="test_task_on_execute", on_execute_callback=lambda *args, **kwargs: None)
EmptyOperator(task_id="test_task_on_success", on_success_callback=lambda *args, **kwargs: None)
EmptyOperator(
task_id="test_task_outlets", outlets=[Asset(name="hello", uri="test://asset1", group="test-group")]
)
| MyEmptyOperator |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 21173,
"end": 21926
} | class ____(Screen):
"""A screen with a priority binding."""
BINDINGS = [
Binding("0", "app.record('screen_0')", "0", priority=False),
Binding("a", "app.record('screen_a')", "a", priority=False),
Binding("b", "app.record('screen_b')", "b", priority=True),
Binding("c", "app.record('screen_c')", "c", priority=False),
Binding("d", "app.record('screen_d')", "c", priority=True),
Binding("e", "app.record('screen_e')", "e", priority=False),
Binding("f", "app.record('screen_f')", "f", priority=True),
]
def compose(self) -> ComposeResult:
yield PriorityOverlapWidget()
def on_mount(self) -> None:
self.query_one(PriorityOverlapWidget).focus()
| PriorityOverlapScreen |
python | pytorch__pytorch | torch/mtia/mtia_graph.py | {
"start": 155,
"end": 1196
} | class ____(torch._C._MTIAGraph):
"""
Wrapper around a MTIA graph.
"""
def __new__(cls, keep_graph: bool = False) -> Self:
return super().__new__(cls, keep_graph)
def capture_begin(self, pool: _POOL_HANDLE) -> None:
"""
Begin capturing a MTIA graph.
"""
super().capture_begin(pool)
def capture_end(self) -> None:
"""
End the capture of a MTIA graph.
"""
super().capture_end()
def instantiate(self) -> None:
"""
Instantiate the captured MTIA graph.
"""
super().instantiate()
def replay(self) -> None:
"""
Replay the captured MTIA graph.
"""
super().replay()
def reset(self) -> None:
"""
Destroy the captured graph and reset the states.
"""
super().reset()
def pool(self) -> _POOL_HANDLE:
"""
Return an opaque token representing the id of this graph's memory pool
"""
return super().pool()
| MTIAGraph |
python | django-import-export__django-import-export | import_export/formats/base_formats.py | {
"start": 3974,
"end": 4109
} | class ____(TextFormat):
TABLIB_MODULE = "tablib.formats._ods"
CONTENT_TYPE = "application/vnd.oasis.opendocument.spreadsheet"
| ODS |
python | google__jax | jax/_src/core.py | {
"start": 109628,
"end": 113570
} | class ____(Primitive):
multiple_results = True
map_primitive = True
def bind(self, *args, **params):
return self._true_bind(*args, **params)
def bind_with_trace(self, trace, fun_and_args, params):
fun: lu.WrappedFun = fun_and_args[0]
args = fun_and_args[1:]
assert len(params['in_axes']) == len(args)
return trace.process_map(self, fun, args, params)
def process(self, trace, fun, tracers, params):
return trace.process_map(self, fun, tracers, params)
def get_bind_params(self, params):
new_params = dict(params)
jaxpr: Jaxpr = new_params.pop('call_jaxpr')
subfun = lu.hashable_partial(
lu.wrap_init(eval_jaxpr, debug_info=jaxpr.debug_info), jaxpr, ())
axes = new_params.pop('out_axes')
new_params['out_axes_thunk'] = HashableFunction(lambda: axes, closure=axes)
return [subfun], new_params
def mapped_aval(size: AxisSize, axis: int | None,
aval: AbstractValue) -> AbstractValue:
handler, _ = aval_mapping_handlers.get(type(aval), (None, None))
if handler is not None:
return handler(size, axis, aval)
else:
raise TypeError(f"no mapping handler for {aval} of type {type(aval)}")
def unmapped_aval(size: AxisSize, axis: int | None,
aval: AbstractValue, explicit_mesh_axis=None) -> AbstractValue:
_, handler = aval_mapping_handlers.get(type(aval), (None, None))
if handler is not None:
return handler(size, axis, explicit_mesh_axis, aval)
else:
raise TypeError(f"no unmapping handler for {aval} of type {type(aval)}")
def _map_shaped_array(
size: int, axis: int | None, aval: ShapedArray) -> ShapedArray:
assert axis is None or aval.shape[axis] == size
if axis is None:
return aval
aval_s = aval.sharding
sharding = aval_s.update(
spec=aval_s.spec.update(partitions=tuple_delete(aval_s.spec, axis)))
return ShapedArray(tuple_delete(aval.shape, axis), aval.dtype,
weak_type=aval.weak_type, sharding=sharding, vma=aval.vma,
memory_space=aval.memory_space)
def _unmap_shaped_array(
size: int, axis: int | None, explicit_mesh_axis, aval: ShapedArray
) -> ShapedArray:
if axis is None:
return aval
elif type(axis) is int:
aval_s = aval.sharding
sharding = aval_s.update(spec=aval_s.spec.update(partitions=tuple_insert(
aval_s.spec, axis, explicit_mesh_axis)))
return ShapedArray(tuple_insert(aval.shape, axis, size), aval.dtype,
weak_type=aval.weak_type, sharding=sharding,
vma=aval.vma, memory_space=aval.memory_space)
else:
raise TypeError(axis)
def _map_dshaped_array(
size: AxisSize, axis: int | None, aval: DShapedArray) -> DShapedArray:
if axis is None: return aval
return DShapedArray(tuple_delete(aval.shape, axis), aval.dtype,
aval.weak_type)
def _unmap_dshaped_array(
size: AxisSize, axis: int | None, explicit_mesh_axis, aval: DShapedArray
) -> DShapedArray:
if axis is None: return aval
elif type(axis) is int:
return DShapedArray(tuple_insert(aval.shape, axis, size), aval.dtype,
weak_type=aval.weak_type)
else:
raise TypeError(axis)
AvalMapHandlerPair = tuple[Callable, Callable]
aval_mapping_handlers: dict[type, AvalMapHandlerPair] = {
DShapedArray: (_map_dshaped_array, _unmap_dshaped_array),
ShapedArray: (_map_shaped_array, _unmap_shaped_array),
AbstractToken: (lambda _, __, a: a, lambda _, __, ____, a: a)
}
# When a mapped function is given no axis name, we generate a name object based
# on the id of the function object. Collisions aren't important because this
# name can't be used in collectives, as user code never gets a ref to this
# object. We don't want to use the function object itself because that might
# persist references to the function object.
# TODO(mattjj): revisit this unique axis name strategy
@total_ordering
| MapPrimitive |
python | walkccc__LeetCode | solutions/154. Find Minimum in Rotated Sorted Array II/154.py | {
"start": 0,
"end": 272
} | class ____:
def findMin(self, nums: list[int]) -> int:
l = 0
r = len(nums) - 1
while l < r:
m = (l + r) // 2
if nums[m] == nums[r]:
r -= 1
elif nums[m] < nums[r]:
r = m
else:
l = m + 1
return nums[l]
| Solution |
python | crytic__slither | slither/tools/upgradeability/checks/initialization.py | {
"start": 2607,
"end": 3721
} | class ____(AbstractCheck):
ARGUMENT = "init-inherited"
IMPACT = CheckClassification.INFORMATIONAL
HELP = "Initializable is not inherited"
WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#initializable-is-not-inherited"
WIKI_TITLE = "Initializable is not inherited"
# region wiki_description
WIKI_DESCRIPTION = """
Detect if `Initializable` is inherited.
"""
# endregion wiki_description
# region wiki_recommendation
WIKI_RECOMMENDATION = """
Review manually the contract's initialization. Consider inheriting `Initializable`.
"""
# endregion wiki_recommendation
REQUIRE_CONTRACT = True
def _check(self):
initializable = self.contract.file_scope.get_contract_from_name("Initializable")
# See InitializablePresent
if initializable is None:
return []
if initializable not in self.contract.inheritance:
info = [self.contract, " does not inherit from ", initializable, ".\n"]
json = self.generate_result(info)
return [json]
return []
| InitializableInherited |
python | getsentry__sentry | tests/sentry/debug_files/test_artifact_bundles.py | {
"start": 6082,
"end": 16109
} | class ____(TestCase):
def setUp(self) -> None:
self.release_name = "1.0.0"
self.dist_name = "dist1"
def create_bundle(self, date_added=None, date_last_modified=None):
if date_added is None:
date_added = timezone.now()
if date_last_modified is None:
date_last_modified = date_added
file = self.create_file(name="bundle.zip")
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
bundle_id=uuid.uuid4(),
file=file,
artifact_count=5,
date_added=date_added,
date_uploaded=date_added,
date_last_modified=date_last_modified,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
date_added=date_added,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
artifact_bundle=artifact_bundle,
release_name=self.release_name,
dist_name=self.dist_name,
date_added=date_added,
)
return artifact_bundle
def assert_results(self, results: set[tuple[int, datetime]], expected_bundle_ids: set[int]):
assert {bundle_id for bundle_id, _ in results} == expected_bundle_ids
def test_get_artifact_bundles_containing_url_exact_match(self) -> None:
"""Test retrieving a bundle with an exact URL match."""
bundle = self.create_bundle()
url = "http://example.com/file.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle,
url=url,
date_added=bundle.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, url
),
{bundle.id},
)
def test_get_artifact_bundles_containing_url_suffix_match(self) -> None:
"""Test retrieving a bundle with a URL suffix match."""
bundle = self.create_bundle()
full_url = "http://example.com/path/to/file.js"
search_url = "file.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle,
url=full_url,
date_added=bundle.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, search_url
),
{bundle.id},
)
def test_get_artifact_bundles_containing_url_no_match(self) -> None:
"""Test retrieving bundles when none match the URL."""
bundle = self.create_bundle()
url = "http://example.com/file.js"
search_url = "nonexistent.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle,
url=url,
date_added=bundle.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, search_url
),
set(),
)
def test_get_artifact_bundles_containing_url_multiple_matches(self) -> None:
"""Test retrieving multiple bundles with the same URL."""
# Create bundles with different timestamps
bundle1 = self.create_bundle(
date_added=timezone.now() - timedelta(days=2),
date_last_modified=timezone.now() - timedelta(days=2),
)
bundle2 = self.create_bundle(
date_added=timezone.now() - timedelta(days=1),
date_last_modified=timezone.now() - timedelta(days=1),
)
bundle3 = self.create_bundle() # Most recent
url = "http://example.com/file.js"
# Create index entries for the URL in all bundles
for bundle in [bundle1, bundle2, bundle3]:
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle,
url=url,
date_added=bundle.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, url
),
{bundle3.id, bundle2.id, bundle1.id},
)
def test_get_artifact_bundles_containing_url_different_project(self) -> None:
"""Test that bundles from a different project are not returned."""
bundle = self.create_bundle()
url = "http://example.com/file.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle,
url=url,
date_added=bundle.date_added,
)
other_project = self.create_project(organization=self.organization)
self.assert_results(
get_artifact_bundles_containing_url(
other_project, self.release_name, self.dist_name, url
),
set(),
)
def test_get_artifact_bundles_containing_url_different_release(self) -> None:
"""Test that bundles from a different release are not returned."""
bundle = self.create_bundle()
url = "http://example.com/file.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle,
url=url,
date_added=bundle.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(self.project, "2.0.0", self.dist_name, url),
set(),
)
def test_contains(self) -> None:
"""
Test that demonstrates why we use reversed_url__istartswith instead of contains.
A 'contains' query would match parts of filenames anywhere, but we want to match
only suffix patterns.
"""
bundle = self.create_bundle()
url = "http://example.com/path/with/file.js/in/deeper/directory/index.html"
search_string = "file.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle,
url=url,
date_added=bundle.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, search_string
),
{bundle.id},
)
bundle2 = self.create_bundle()
url2 = "http://example.com/path/to/file.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle2,
url=url2,
date_added=bundle2.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, search_string
),
{bundle.id, bundle2.id},
)
def test_case_insensitive_url_matching(self) -> None:
"""Test that URLs with different casing match properly."""
bundle = self.create_bundle()
url = "http://example.com/path/to/CamelCaseFile.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle,
url=url,
date_added=bundle.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, "camelcasefile.js"
),
{bundle.id},
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, "CAMELCASEFILE.JS"
),
{bundle.id},
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, "cAmElCaSeFilE.Js"
),
{bundle.id},
)
def test_multiple_bundles_with_different_urls(self) -> None:
"""Test that when we have multiple bundles with different URLs, we match to only one."""
bundle1 = self.create_bundle()
bundle2 = self.create_bundle()
bundle3 = self.create_bundle()
url1 = "http://example.com/path/to/file1.js"
url2 = "http://example.com/path/to/file2.js"
url3 = "http://example.com/path/to/file3.js"
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle1,
url=url1,
date_added=bundle1.date_added,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle2,
url=url2,
date_added=bundle2.date_added,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=bundle3,
url=url3,
date_added=bundle3.date_added,
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, "file1.js"
),
{bundle1.id},
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, "file2.js"
),
{bundle2.id},
)
self.assert_results(
get_artifact_bundles_containing_url(
self.project, self.release_name, self.dist_name, "file3.js"
),
{bundle3.id},
)
| GetArtifactBundlesContainingUrlTest |
python | wandb__wandb | wandb/vendor/pygments/lexers/sql.py | {
"start": 23372,
"end": 27192
} | class ____(RegexLexer):
"""
Special lexer for MySQL.
"""
name = 'MySQL'
aliases = ['mysql']
mimetypes = ['text/x-mysql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#|--\s+).*\n?', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'[0-9]+', Number.Integer),
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
(r"'(\\\\|\\'|''|[^'])*'", String.Single),
(r'"(\\\\|\\"|""|[^"])*"', String.Double),
(r"`(\\\\|\\`|``|[^`])*`", String.Symbol),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
bygroups(Keyword.Type, Text, Punctuation)),
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
r'character|check|collate|column|condition|constraint|continue|'
r'convert|create|cross|current_date|current_time|'
r'current_timestamp|current_user|cursor|database|databases|'
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
r'declare|default|delayed|delete|desc|describe|deterministic|'
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
r'enclosed|escaped|exists|exit|explain|fetch|flush|float|float4|'
r'float8|for|force|foreign|from|fulltext|grant|group|having|'
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
r'minute_microsecond|minute_second|mod|modifies|natural|'
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
r'replace|require|restrict|return|revoke|right|rlike|schema|'
r'schemas|second_microsecond|select|sensitive|separator|set|'
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
r'varying|when|where|while|with|write|x509|xor|year_month|'
r'zerofill)\b', Keyword),
# TODO: this list is not complete
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
(r'(true|false|null)', Name.Constant),
(r'([a-z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-z_]\w*', Name),
(r'@[a-z0-9]*[._]*[a-z0-9]*', Name.Variable),
(r'[;:()\[\],.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
| MySqlLexer |
python | ipython__ipython | IPython/terminal/magics.py | {
"start": 1075,
"end": 7703
} | class ____(Magics):
def __init__(self, shell):
super(TerminalMagics, self).__init__(shell)
def store_or_execute(self, block, name, store_history=False):
""" Execute a block, or store it in a variable, per the user's request.
"""
if name:
# If storing it for further editing
self.shell.user_ns[name] = SList(block.splitlines())
print("Block assigned to '%s'" % name)
else:
b = self.preclean_input(block)
self.shell.user_ns['pasted_block'] = b
self.shell.using_paste_magics = True
try:
self.shell.run_cell(b, store_history)
finally:
self.shell.using_paste_magics = False
def preclean_input(self, block):
lines = block.splitlines()
while lines and not lines[0].strip():
lines = lines[1:]
return strip_email_quotes('\n'.join(lines))
def rerun_pasted(self, name='pasted_block'):
""" Rerun a previously pasted command.
"""
b = self.shell.user_ns.get(name)
# Sanity checks
if b is None:
raise UsageError('No previous pasted block available')
if not isinstance(b, str):
raise UsageError(
"Variable 'pasted_block' is not a string, can't execute")
print("Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b)))
self.shell.run_cell(b)
@line_magic
def autoindent(self, parameter_s = ''):
"""Toggle autoindent on/off (deprecated)"""
self.shell.set_autoindent()
print("Automatic indentation is:",['OFF','ON'][self.shell.autoindent])
@skip_doctest
@line_magic
def cpaste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
You must terminate the block with '--' (two minus-signs) or Ctrl-D
alone on the line. You can also provide your own sentinel with '%paste
-s %%' ('%%' is the new sentinel for this operation).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%cpaste foo'.
This assigns the pasted block to variable 'foo' as string, without
dedenting or executing it (preceding >>> and + is still stripped)
'%cpaste -r' re-executes the block previously entered by cpaste.
'%cpaste -q' suppresses any additional output messages.
Do not be alarmed by garbled output on Windows (it's a readline bug).
Just press enter and type -- (and press enter again) and the block
will be what was just pasted.
Shell escapes are not supported (yet).
See Also
--------
paste : automatically pull code from clipboard.
Examples
--------
::
In [8]: %cpaste
Pasting code; enter '--' alone on the line to stop.
:>>> a = ["world!", "Hello"]
:>>> print(" ".join(sorted(a)))
:--
Hello world!
::
In [8]: %cpaste
Pasting code; enter '--' alone on the line to stop.
:>>> %alias_magic t timeit
:>>> %t -n1 pass
:--
Created `%t` as an alias for `%timeit`.
Created `%%t` as an alias for `%%timeit`.
354 ns ± 224 ns per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
opts, name = self.parse_options(parameter_s, 'rqs:', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
quiet = ('q' in opts)
sentinel = opts.get('s', u'--')
block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
self.store_or_execute(block, name, store_history=True)
@line_magic
def paste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
The text is pulled directly from the clipboard without user
intervention and printed back on the screen before execution (unless
the -q flag is given to force quiet mode).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%paste foo'.
This assigns the pasted block to variable 'foo' as string, without
executing it (preceding >>> and + is still stripped).
Options:
-r: re-executes the block previously entered by cpaste.
-q: quiet mode: do not echo the pasted text back to the terminal.
IPython statements (magics, shell escapes) are not supported (yet).
See Also
--------
cpaste : manually paste code into terminal until you mark its end.
"""
opts, name = self.parse_options(parameter_s, 'rq', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
try:
block = self.shell.hooks.clipboard_get()
except TryNext as clipboard_exc:
message = getattr(clipboard_exc, 'args')
if message:
error(message[0])
else:
error('Could not get text from the clipboard.')
return
except ClipboardEmpty as e:
raise UsageError("The clipboard appears to be empty") from e
# By default, echo back to terminal unless quiet mode is requested
if 'q' not in opts:
sys.stdout.write(self.shell.pycolorize(block))
if not block.endswith("\n"):
sys.stdout.write("\n")
sys.stdout.write("## -- End pasted text --\n")
self.store_or_execute(block, name, store_history=True)
# Class-level: add a '%cls' magic only on Windows
if sys.platform == 'win32':
@line_magic
def cls(self, s):
"""Clear screen.
"""
os.system("cls")
| TerminalMagics |
python | huggingface__transformers | tests/models/voxtral/test_modeling_voxtral.py | {
"start": 4269,
"end": 9155
} | class ____(
ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase
):
"""
Model tester for `VoxtralForConditionalGeneration`.
"""
all_model_classes = (VoxtralForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{"text-to-speech": VoxtralForConditionalGeneration, "any-to-any": VoxtralForConditionalGeneration}
if is_torch_available()
else {}
)
_is_composite = True
def setUp(self):
self.model_tester = VoxtralModelTester(self)
self.config_tester = ConfigTester(self, config_class=VoxtralConfig, has_text_modality=False)
@unittest.skip(
reason="This test does not apply to Voxtral since inputs_embeds corresponding to audio tokens are replaced when input features are provided."
)
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(
reason="Voxtral need lots of steps to prepare audio/mask correctly to get pad-free inputs. Cf llava (reference multimodal model)"
)
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
reason="Voxtral need lots of steps to prepare audio/mask correctly to get pad-free inputs. Cf llava (reference multimodal model)"
)
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
reason="Voxtral need lots of steps to prepare audio/mask correctly to get pad-free inputs. Cf llava (reference multimodal model)"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
reason="Voxtral need lots of steps to prepare audio/mask correctly to get pad-free inputs. Cf llava (reference multimodal model)"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
pass
@unittest.skip(
reason="Voxtral need lots of steps to prepare audio/mask correctly to get pad-free inputs. Cf llava (reference multimodal model)"
)
def test_flash_attention_3_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
reason="Voxtral need lots of steps to prepare audio/mask correctly to get pad-free inputs. Cf llava (reference multimodal model)"
)
def test_flash_attention_3_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
pass
@unittest.skip(reason="Voxtral has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
def test_sdpa_can_dispatch_composite_models(self):
# overwrite because Voxtral is audio+text model (not vision+text)
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
text_attn = "sdpa" if model.language_model._supports_sdpa else "eager"
vision_attn = "sdpa" if model.audio_tower._supports_sdpa else "eager"
# `None` as it is the requested one which will be assigned to each sub-config
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model.language_model.config._attn_implementation == text_attn)
self.assertTrue(model.audio_tower.config._attn_implementation == vision_attn)
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.audio_tower.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
@require_torch
| VoxtralForConditionalGenerationModelTest |
python | keras-team__keras | keras/src/layers/merging/add.py | {
"start": 162,
"end": 2150
} | class ____(Merge):
"""Performs elementwise addition operation.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Add()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `added = keras.layers.add([x1, x2])`
>>> added = keras.layers.Add()([x1, x2])
>>> out = keras.layers.Dense(4)(added)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.add(output, inputs[i])
return output
@keras_export("keras.layers.add")
def add(inputs, **kwargs):
"""Functional interface to the `keras.layers.Add` layer.
Args:
inputs: A list of input tensors with the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the sum of the inputs. It has the same shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.add([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> added = keras.layers.add([x1, x2])
>>> out = keras.layers.Dense(4)(added)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Add(**kwargs)(inputs)
| Add |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1017247,
"end": 1017784
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of UpdateEnterpriseAdministratorRole"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of changing the administrator's
role.
"""
| UpdateEnterpriseAdministratorRolePayload |
python | google__jax | tests/pallas/tpu_pallas_test.py | {
"start": 2895,
"end": 4451
} | class ____(PallasBaseTest):
@parameterized.parameters(
(pl.Buffered(2), pl.Buffered(2)),
(pl.Buffered(2), pl.Buffered(1)),
(pl.Buffered(1), pl.Buffered(1)))
def test_two_input_vadd(self, x_pmode : pl.Buffered, y_pmode : pl.Buffered):
def body(x_ref, y_ref, o_ref):
x = x_ref[:]
y = y_ref[:]
o_ref[:] = x + y
size_in_vregs = 128
data_size = size_in_vregs * 1024
block_size = 1024
x = jnp.arange(data_size, dtype=jnp.float32)
y = jnp.arange(data_size, dtype=jnp.float32)
in_specs = [
pl.BlockSpec((block_size,), lambda i: i, pipeline_mode=pmode)
for pmode in [x_pmode, y_pmode]
]
out_specs = pl.BlockSpec((block_size,), lambda i: i)
@jax.jit
def vadd(x, y):
return self.pallas_call(
body,
out_shape=jax.ShapeDtypeStruct(x.shape, jnp.float32),
in_specs=in_specs,
out_specs=out_specs,
grid=data_size // block_size,
)(x, y)
compiled = (
vadd.lower(
jax.ShapeDtypeStruct(x.shape, x.dtype),
jax.ShapeDtypeStruct(y.shape, y.dtype),
)
.compile()
.as_text()
)
pattern = (
r'"used_scoped_memory_configs":\[\{"memory_space":"1",.*?"size":"(\d+)"'
)
expected_vmem_usage = block_size * 4 * (2 + x_pmode.buffer_count + y_pmode.buffer_count)
vmem_usage = int(re.search(pattern, compiled).group(1))
self.assertEqual(vmem_usage, expected_vmem_usage)
z = vadd(x, y)
np.testing.assert_allclose(z, x + y)
| TPUPipelineModeTest |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 3208,
"end": 3528
} | class ____(TestWebSocketHandler):
def set_default_headers(self):
self.set_header("X-Extra-Response-Header", "Extra-Response-Value")
def prepare(self):
for k, v in self.request.headers.get_all():
if k.lower().startswith("x-test"):
self.set_header(k, v)
| HeaderEchoHandler |
python | mlflow__mlflow | mlflow/entities/_mlflow_object.py | {
"start": 47,
"end": 950
} | class ____:
def __iter__(self):
# Iterate through list of properties and yield as key -> value
for prop in self._properties():
yield prop, self.__getattribute__(prop)
@classmethod
def _get_properties_helper(cls):
return sorted([p for p in cls.__dict__ if isinstance(getattr(cls, p), property)])
@classmethod
def _properties(cls):
return cls._get_properties_helper()
@classmethod
@abstractmethod
def from_proto(cls, proto):
pass
@classmethod
def from_dictionary(cls, the_dict):
filtered_dict = {key: value for key, value in the_dict.items() if key in cls._properties()}
return cls(**filtered_dict)
def __repr__(self):
return to_string(self)
def to_string(obj):
return _MlflowObjectPrinter().to_string(obj)
def get_classname(obj):
return type(obj).__name__
| _MlflowObject |
python | great-expectations__great_expectations | great_expectations/core/expectation_diagnostics/expectation_test_data_cases.py | {
"start": 1598,
"end": 1946
} | class ____(SerializableDictDot):
"""A single test case, with input arguments and output"""
title: str
input: Dict[str, Any]
output: Dict[str, Any]
exact_match_out: bool
suppress_test_for: List[str] = field(default_factory=list)
include_in_gallery: bool = False
only_for: Optional[List[str]] = None
| ExpectationTestCase |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 14585,
"end": 14652
} | class ____(TupleCompare):
pass
@infer_global(operator.ge)
| TupleNe |
python | run-llama__llama_index | llama-index-integrations/retrievers/llama-index-retrievers-tldw/llama_index/retrievers/tldw/base.py | {
"start": 594,
"end": 820
} | class ____(BaseModel):
"""Represents a video scene containing multiple fragments."""
media_id: str
external_id: str
start_ms: float
end_ms: float
max_similarity: float
fragments: List[Fragment]
| Scene |
python | kamyu104__LeetCode-Solutions | Python/maximum-sum-obtained-of-any-permutation.py | {
"start": 52,
"end": 1326
} | class ____(object):
def maxSumRangeQuery(self, nums, requests):
"""
:type nums: List[int]
:type requests: List[List[int]]
:rtype: int
"""
def addmod(a, b, mod): # avoid overflow in other languages
a %= mod
b %= mod
if mod-a <= b:
b -= mod
return a+b
def mulmod(a, b, mod): # avoid overflow in other languages
a %= mod
b %= mod
if a < b:
a, b = b, a
result = 0
while b > 0:
if b%2 == 1:
result = addmod(result, a, mod)
a = addmod(a, a, mod)
b //= 2
return result
MOD = 10**9+7
count = [0]*len(nums)
for start, end in requests:
count[start] += 1
if end+1 < len(count):
count[end+1] -= 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
nums.sort()
count.sort()
result = 0
for i, (num, c) in enumerate(itertools.izip(nums, count)):
# result = addmod(result, mulmod(num, c, MOD), MOD)
result = (result+num*c)%MOD
return result
| Solution |
python | sympy__sympy | sympy/tensor/array/ndim_array.py | {
"start": 435,
"end": 2441
} | class ____(Kind):
"""
Kind for N-dimensional array in SymPy.
This kind represents the multidimensional array that algebraic
operations are defined. Basic class for this kind is ``NDimArray``,
but any expression representing the array can have this.
Parameters
==========
element_kind : Kind
Kind of the element. Default is :obj:NumberKind `<sympy.core.kind.NumberKind>`,
which means that the array contains only numbers.
Examples
========
Any instance of array class has ``ArrayKind``.
>>> from sympy import NDimArray
>>> NDimArray([1,2,3]).kind
ArrayKind(NumberKind)
Although expressions representing an array may be not instance of
array class, it will have ``ArrayKind`` as well.
>>> from sympy import Integral
>>> from sympy.tensor.array import NDimArray
>>> from sympy.abc import x
>>> intA = Integral(NDimArray([1,2,3]), x)
>>> isinstance(intA, NDimArray)
False
>>> intA.kind
ArrayKind(NumberKind)
Use ``isinstance()`` to check for ``ArrayKind` without specifying
the element kind. Use ``is`` with specifying the element kind.
>>> from sympy.tensor.array import ArrayKind
>>> from sympy.core import NumberKind
>>> boolA = NDimArray([True, False])
>>> isinstance(boolA.kind, ArrayKind)
True
>>> boolA.kind is ArrayKind(NumberKind)
False
See Also
========
shape : Function to return the shape of objects with ``MatrixKind``.
"""
def __new__(cls, element_kind=NumberKind):
obj = super().__new__(cls, element_kind)
obj.element_kind = element_kind
return obj
def __repr__(self):
return f"ArrayKind({self.element_kind})"
@classmethod
def _union(cls, kinds) -> 'ArrayKind':
elem_kinds = {e.kind for e in kinds}
if len(elem_kinds) == 1:
elemkind, = elem_kinds
else:
elemkind = UndefinedKind
return ArrayKind(elemkind)
| ArrayKind |
python | numba__llvmlite | llvmlite/tests/customize.py | {
"start": 9076,
"end": 9902
} | class ____(object):
"""
A minimal, picklable TestResult-alike object.
"""
__slots__ = (
'failures', 'errors', 'skipped', 'expectedFailures',
'unexpectedSuccesses', 'stream', 'shouldStop', 'testsRun')
def fixup_case(self, case):
"""
Remove any unpicklable attributes from TestCase instance *case*.
"""
# Python 3.3 doesn't reset this one.
case._outcomeForDoCleanups = None
def __init__(self, original_result):
for attr in self.__slots__:
setattr(self, attr, getattr(original_result, attr))
for case, _ in self.expectedFailures:
self.fixup_case(case)
for case, _ in self.errors:
self.fixup_case(case)
for case, _ in self.failures:
self.fixup_case(case)
| _MinimalResult |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_slugs.py | {
"start": 3443,
"end": 3878
} | class ____(util.MdCase):
"""Test different normalization methods."""
extension = ['markdown.extensions.toc']
extension_configs = {
'markdown.extensions.toc': {
"slugify": slugs.slugify(normalize='NFD')
}
}
def test_slug(self):
"""Test the slug output."""
self.check_markdown(
r'# Théâtre',
r'<h1 id="Theatre">Théâtre</h1>'
)
| TestNormalize |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/builder_pattern.py | {
"start": 2617,
"end": 3026
} | class ____(Builder):
pass
def test_no_issue_with_sub_builder():
builder = SubBuilder()
builder.set_not_saved_through_typevar(_test_source()).set_saved_through_typevar(
"benign"
).async_save()
def test_issue_with_sub_builder():
builder = SubBuilder()
builder.set_not_saved_through_typevar("benign").set_saved_through_typevar(
_test_source()
).async_save()
| SubBuilder |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 235763,
"end": 237822
} | class ____(GeneratedAirbyteSource):
class Oauth2Credentials:
@public
def __init__(
self,
access_token: str,
refresh_token: str,
token_uri: str,
client_id: str,
client_secret: str,
):
self.access_token = check.str_param(access_token, "access_token")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
self.token_uri = check.str_param(token_uri, "token_uri")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
@public
def __init__(
self,
name: str,
credentials: "Dv360Source.Oauth2Credentials",
partner_id: int,
start_date: str,
end_date: Optional[str] = None,
filters: Optional[list[str]] = None,
):
"""Airbyte Source for Dv 360.
Args:
name (str): The name of the destination.
credentials (Dv360Source.Oauth2Credentials): Oauth2 credentials
partner_id (int): Partner ID
start_date (str): UTC date and time in the format 2017-01-25. Any data before this date will not be replicated
end_date (Optional[str]): UTC date and time in the format 2017-01-25. Any data after this date will not be replicated.
filters (Optional[List[str]]): filters for the dimensions. each filter object had 2 keys: 'type' for the name of the dimension to be used as. and 'value' for the value of the filter
"""
self.credentials = check.inst_param(
credentials, "credentials", Dv360Source.Oauth2Credentials
)
self.partner_id = check.int_param(partner_id, "partner_id")
self.start_date = check.str_param(start_date, "start_date")
self.end_date = check.opt_str_param(end_date, "end_date")
self.filters = check.opt_nullable_list_param(filters, "filters", str)
super().__init__("Dv 360", name)
| Dv360Source |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/utils/test_pod_manager.py | {
"start": 43511,
"end": 50301
} | class ____:
@pytest.mark.parametrize(
("chunks", "expected_logs"),
[
([b"message"], [b"message"]),
([b"message1\nmessage2"], [b"message1\n", b"message2"]),
([b"message1\n", b"message2"], [b"message1\n", b"message2"]),
([b"first_part", b"_second_part"], [b"first_part_second_part"]),
([b""], [b""]),
],
)
def test_chunks(self, chunks, expected_logs):
with mock.patch.object(PodLogsConsumer, "logs_available") as logs_available:
logs_available.return_value = True
consumer = PodLogsConsumer(
response=mock.MagicMock(stream=mock.MagicMock(return_value=chunks)),
pod=mock.MagicMock(),
pod_manager=mock.MagicMock(container_is_running=mock.MagicMock(return_value=True)),
container_name="base",
)
assert list(consumer) == expected_logs
def test_container_is_not_running(self):
with mock.patch.object(PodLogsConsumer, "logs_available") as logs_available:
logs_available.return_value = False
consumer = PodLogsConsumer(
response=mock.MagicMock(stream=mock.MagicMock(return_value=[b"message1", b"message2"])),
pod=mock.MagicMock(),
pod_manager=mock.MagicMock(container_is_running=mock.MagicMock(return_value=False)),
container_name="base",
)
assert list(consumer) == []
@pytest.mark.parametrize(
(
"container_run",
"termination_time",
"now_time",
"post_termination_timeout",
"expected_logs_available",
),
[
(
False,
datetime(2022, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2022, 1, 1, 0, 1, 0, 0, tzinfo=utc),
120,
True,
),
(
False,
datetime(2022, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2022, 1, 1, 0, 2, 0, 0, tzinfo=utc),
120,
False,
),
(
False,
datetime(2022, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2022, 1, 1, 0, 5, 0, 0, tzinfo=utc),
120,
False,
),
(
True,
datetime(2022, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2022, 1, 1, 0, 1, 0, 0, tzinfo=utc),
120,
True,
),
(
True,
datetime(2022, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2022, 1, 1, 0, 2, 0, 0, tzinfo=utc),
120,
True,
),
(
True,
datetime(2022, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2022, 1, 1, 0, 5, 0, 0, tzinfo=utc),
120,
True,
),
],
)
@mock.patch("airflow.providers.cncf.kubernetes.utils.pod_manager.container_is_running")
@mock.patch("airflow.providers.cncf.kubernetes.utils.pod_manager.get_container_status")
def test_logs_available(
self,
mock_get_container_status,
mock_container_is_running,
container_run,
termination_time,
now_time,
post_termination_timeout,
expected_logs_available,
):
mock_container_is_running.return_value = container_run
mock_get_container_status.return_value = mock.MagicMock(
state=mock.MagicMock(terminated=mock.MagicMock(finished_at=termination_time))
)
with time_machine.travel(now_time):
consumer = PodLogsConsumer(
response=mock.MagicMock(),
pod=mock.MagicMock(),
pod_manager=mock.MagicMock(),
container_name="base",
post_termination_timeout=post_termination_timeout,
)
assert consumer.logs_available() == expected_logs_available
@pytest.mark.parametrize(
(
"read_pod_cache_timeout",
"mock_read_pod_at_0",
"mock_read_pod_at_1",
"mock_read_pods",
"expected_read_pods",
),
[
(
120,
datetime(2023, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2023, 1, 1, 0, 1, 0, 0, tzinfo=utc),
["Read pod #0", "Read pod #1"],
["Read pod #0", "Read pod #0"],
),
(
120,
datetime(2023, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2023, 1, 1, 0, 2, 0, 0, tzinfo=utc),
["Read pod #0", "Read pod #1"],
["Read pod #0", "Read pod #0"],
),
(
120,
datetime(2023, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2023, 1, 1, 0, 3, 0, 0, tzinfo=utc),
["Read pod #0", "Read pod #1"],
["Read pod #0", "Read pod #1"],
),
(
2,
datetime(2023, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2023, 1, 1, 0, 0, 1, 0, tzinfo=utc),
["Read pod #0", "Read pod #1"],
["Read pod #0", "Read pod #0"],
),
(
2,
datetime(2023, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2023, 1, 1, 0, 0, 2, 0, tzinfo=utc),
["Read pod #0", "Read pod #1"],
["Read pod #0", "Read pod #0"],
),
(
2,
datetime(2023, 1, 1, 0, 0, 0, 0, tzinfo=utc),
datetime(2023, 1, 1, 0, 0, 3, 0, tzinfo=utc),
["Read pod #0", "Read pod #1"],
["Read pod #0", "Read pod #1"],
),
],
)
def test_read_pod(
self,
read_pod_cache_timeout,
mock_read_pod_at_0,
mock_read_pod_at_1,
mock_read_pods,
expected_read_pods,
):
consumer = PodLogsConsumer(
response=mock.MagicMock(),
pod=mock.MagicMock(),
pod_manager=mock.MagicMock(),
container_name="base",
read_pod_cache_timeout=read_pod_cache_timeout,
)
consumer.pod_manager.read_pod.side_effect = mock_read_pods
# first read
with time_machine.travel(mock_read_pod_at_0):
assert consumer.read_pod() == expected_read_pods[0]
# second read
with time_machine.travel(mock_read_pod_at_1):
assert consumer.read_pod() == expected_read_pods[1]
| TestPodLogsConsumer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 291823,
"end": 292692
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of RevokeMigratorRole"""
__schema__ = github_schema
__field_names__ = ("organization_id", "actor", "actor_type", "client_mutation_id")
organization_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="organizationId")
"""The ID of the organization that the user/team belongs to."""
actor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="actor")
"""The user login or Team slug to revoke the migrator role from."""
actor_type = sgqlc.types.Field(sgqlc.types.non_null(ActorType), graphql_name="actorType")
"""Specifies the type of the actor, can be either USER or TEAM."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| RevokeMigratorRoleInput |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/schedule.py | {
"start": 682,
"end": 804
} | class ____(BaseModel):
"""GET /api/schedules response."""
items: list[DgApiSchedule]
total: int
| DgApiScheduleList |
python | PyCQA__pylint | tests/pyreverse/functional/class_diagrams/attributes/instance_attributes.py | {
"start": 0,
"end": 177
} | class ____:
def __init__(self):
self.my_int_without_type_hint = 1
self.my_int_with_type_hint: int = 2
self.my_optional_int: int = None
| InstanceAttributes |
python | coleifer__peewee | peewee.py | {
"start": 152914,
"end": 153358
} | class ____(object):
def __init__(self, model, field, name):
self.model = model
self.field = field
self.name = name
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance.__data__.get(self.name)
return self.field
def __set__(self, instance, value):
instance.__data__[self.name] = value
instance._dirty.add(self.name)
| FieldAccessor |
python | realpython__materials | python-getter-setter/person3.py | {
"start": 0,
"end": 182
} | class ____:
def __init__(self, name):
self._name = name
def get_name(self):
return self._name
def set_name(self, value):
self._name = value
| Person |
python | ray-project__ray | rllib/env/external_env.py | {
"start": 505,
"end": 8606
} | class ____(threading.Thread):
"""An environment that interfaces with external agents.
Unlike simulator envs, control is inverted: The environment queries the
policy to obtain actions and in return logs observations and rewards for
training. This is in contrast to gym.Env, where the algorithm drives the
simulation through env.step() calls.
You can use ExternalEnv as the backend for policy serving (by serving HTTP
requests in the run loop), for ingesting offline logs data (by reading
offline transitions in the run loop), or other custom use cases not easily
expressed through gym.Env.
ExternalEnv supports both on-policy actions (through self.get_action()),
and off-policy actions (through self.log_action()).
This env is thread-safe, but individual episodes must be executed serially.
.. testcode::
:skipif: True
from ray.tune import register_env
from ray.rllib.algorithms.dqn import DQN
YourExternalEnv = ...
register_env("my_env", lambda config: YourExternalEnv(config))
algo = DQN(env="my_env")
while True:
print(algo.train())
"""
def __init__(
self,
action_space: gym.Space,
observation_space: gym.Space,
max_concurrent: int = None,
):
"""Initializes an ExternalEnv instance.
Args:
action_space: Action space of the env.
observation_space: Observation space of the env.
"""
threading.Thread.__init__(self)
self.daemon = True
self.action_space = action_space
self.observation_space = observation_space
self._episodes = {}
self._finished = set()
self._results_avail_condition = threading.Condition()
if max_concurrent is not None:
deprecation_warning(
"The `max_concurrent` argument has been deprecated. Please configure"
"the number of episodes using the `rollout_fragment_length` and"
"`batch_mode` arguments. Please raise an issue on the Ray Github if "
"these arguments do not support your expected use case for ExternalEnv",
error=True,
)
def run(self):
"""Override this to implement the run loop.
Your loop should continuously:
1. Call self.start_episode(episode_id)
2. Call self.[get|log]_action(episode_id, obs, [action]?)
3. Call self.log_returns(episode_id, reward)
4. Call self.end_episode(episode_id, obs)
5. Wait if nothing to do.
Multiple episodes may be started at the same time.
"""
raise NotImplementedError
def start_episode(
self, episode_id: Optional[str] = None, training_enabled: bool = True
) -> str:
"""Record the start of an episode.
Args:
episode_id: Unique string id for the episode or
None for it to be auto-assigned and returned.
training_enabled: Whether to use experiences for this
episode to improve the policy.
Returns:
Unique string id for the episode.
"""
if episode_id is None:
episode_id = uuid.uuid4().hex
if episode_id in self._finished:
raise ValueError("Episode {} has already completed.".format(episode_id))
if episode_id in self._episodes:
raise ValueError("Episode {} is already started".format(episode_id))
self._episodes[episode_id] = _ExternalEnvEpisode(
episode_id, self._results_avail_condition, training_enabled
)
return episode_id
def get_action(self, episode_id: str, observation: EnvObsType) -> EnvActionType:
"""Record an observation and get the on-policy action.
Args:
episode_id: Episode id returned from start_episode().
observation: Current environment observation.
Returns:
Action from the env action space.
"""
episode = self._get(episode_id)
return episode.wait_for_action(observation)
def log_action(
self, episode_id: str, observation: EnvObsType, action: EnvActionType
) -> None:
"""Record an observation and (off-policy) action taken.
Args:
episode_id: Episode id returned from start_episode().
observation: Current environment observation.
action: Action for the observation.
"""
episode = self._get(episode_id)
episode.log_action(observation, action)
def log_returns(
self, episode_id: str, reward: float, info: Optional[EnvInfoDict] = None
) -> None:
"""Records returns (rewards and infos) from the environment.
The reward will be attributed to the previous action taken by the
episode. Rewards accumulate until the next action. If no reward is
logged before the next action, a reward of 0.0 is assumed.
Args:
episode_id: Episode id returned from start_episode().
reward: Reward from the environment.
info: Optional info dict.
"""
episode = self._get(episode_id)
episode.cur_reward += reward
if info:
episode.cur_info = info or {}
def end_episode(self, episode_id: str, observation: EnvObsType) -> None:
"""Records the end of an episode.
Args:
episode_id: Episode id returned from start_episode().
observation: Current environment observation.
"""
episode = self._get(episode_id)
self._finished.add(episode.episode_id)
episode.done(observation)
def _get(self, episode_id: str) -> "_ExternalEnvEpisode":
"""Get a started episode by its ID or raise an error."""
if episode_id in self._finished:
raise ValueError("Episode {} has already completed.".format(episode_id))
if episode_id not in self._episodes:
raise ValueError("Episode {} not found.".format(episode_id))
return self._episodes[episode_id]
def to_base_env(
self,
make_env: Optional[Callable[[int], EnvType]] = None,
num_envs: int = 1,
remote_envs: bool = False,
remote_env_batch_wait_ms: int = 0,
restart_failed_sub_environments: bool = False,
) -> "BaseEnv":
"""Converts an RLlib MultiAgentEnv into a BaseEnv object.
The resulting BaseEnv is always vectorized (contains n
sub-environments) to support batched forward passes, where n may
also be 1. BaseEnv also supports async execution via the `poll` and
`send_actions` methods and thus supports external simulators.
Args:
make_env: A callable taking an int as input (which indicates
the number of individual sub-environments within the final
vectorized BaseEnv) and returning one individual
sub-environment.
num_envs: The number of sub-environments to create in the
resulting (vectorized) BaseEnv. The already existing `env`
will be one of the `num_envs`.
remote_envs: Whether each sub-env should be a @ray.remote
actor. You can set this behavior in your config via the
`remote_worker_envs=True` option.
remote_env_batch_wait_ms: The wait time (in ms) to poll remote
sub-environments for, if applicable. Only used if
`remote_envs` is True.
Returns:
The resulting BaseEnv object.
"""
if num_envs != 1:
raise ValueError(
"External(MultiAgent)Env does not currently support "
"num_envs > 1. One way of solving this would be to "
"treat your Env as a MultiAgentEnv hosting only one "
"type of agent but with several copies."
)
env = ExternalEnvWrapper(self)
return env
@OldAPIStack
| ExternalEnv |
python | Textualize__textual | docs/examples/widgets/tabs.py | {
"start": 276,
"end": 1969
} | class ____(App):
"""Demonstrates the Tabs widget."""
CSS = """
Tabs {
dock: top;
}
Screen {
align: center middle;
}
Label {
margin:1 1;
width: 100%;
height: 100%;
background: $panel;
border: tall $primary;
content-align: center middle;
}
"""
BINDINGS = [
("a", "add", "Add tab"),
("r", "remove", "Remove active tab"),
("c", "clear", "Clear tabs"),
]
def compose(self) -> ComposeResult:
yield Tabs(NAMES[0])
yield Label()
yield Footer()
def on_mount(self) -> None:
"""Focus the tabs when the app starts."""
self.query_one(Tabs).focus()
def on_tabs_tab_activated(self, event: Tabs.TabActivated) -> None:
"""Handle TabActivated message sent by Tabs."""
label = self.query_one(Label)
if event.tab is None:
# When the tabs are cleared, event.tab will be None
label.visible = False
else:
label.visible = True
label.update(event.tab.label)
def action_add(self) -> None:
"""Add a new tab."""
tabs = self.query_one(Tabs)
# Cycle the names
NAMES[:] = [*NAMES[1:], NAMES[0]]
tabs.add_tab(NAMES[0])
def action_remove(self) -> None:
"""Remove active tab."""
tabs = self.query_one(Tabs)
active_tab = tabs.active_tab
if active_tab is not None:
tabs.remove_tab(active_tab.id)
def action_clear(self) -> None:
"""Clear the tabs."""
self.query_one(Tabs).clear()
if __name__ == "__main__":
app = TabsApp()
app.run()
| TabsApp |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_tensors_map_ops_test.py | {
"start": 7929,
"end": 10217
} | class ____(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
num_elements = 10000
batch_size = 64
indices_batch = np.random.randint(
batch_size, size=num_elements, dtype=np.int64)
indices_value = np.arange(num_elements, dtype=np.int64)
indices = np.asarray(
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
with session.Session(config=benchmark.benchmark_config()) as sess:
with ops.device("/cpu:0"):
indices = variables.Variable(indices)
values = variables.Variable(values)
shape = variables.Variable(shape)
st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = sparse_ops.serialize_many_sparse(st)
st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
self.evaluate(variables.global_variables_initializer())
st_roundtrip_values = self.evaluate(st_roundtrip)
st_deserialized_values = self.evaluate(st_deserialized)
np.testing.assert_equal(st_roundtrip_values.values,
st_deserialized_values.values)
np.testing.assert_equal(st_roundtrip_values.indices,
st_deserialized_values.indices)
np.testing.assert_equal(st_roundtrip_values.dense_shape,
st_deserialized_values.dense_shape)
self.run_op_benchmark(
sess,
st_roundtrip_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
sess,
st_deserialized_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
test.main()
| BenchmarkSparseTensorsMapVsSerialization |
python | doocs__leetcode | solution/2600-2699/2600.K Items With the Maximum Sum/Solution.py | {
"start": 0,
"end": 286
} | class ____:
def kItemsWithMaximumSum(
self, numOnes: int, numZeros: int, numNegOnes: int, k: int
) -> int:
if numOnes >= k:
return k
if numZeros >= k - numOnes:
return numOnes
return numOnes - (k - numOnes - numZeros)
| Solution |
python | joke2k__faker | tests/providers/test_color.py | {
"start": 20815,
"end": 21392
} | class ____:
"""Test uz_UZ color provider methods"""
def test_color_name(self, faker, num_samples):
for _ in range(num_samples):
color_name = faker.color_name()
assert isinstance(color_name, str)
assert color_name in UzUzColorProvider.all_colors.keys()
def test_safe_color_name(self, faker, num_samples):
for _ in range(num_samples):
safe_color_name = faker.safe_color_name()
assert isinstance(safe_color_name, str)
assert safe_color_name in UzUzColorProvider.safe_colors
| TestUzUz |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-databend/destination_databend/destination.py | {
"start": 530,
"end": 4382
} | class ____(Destination):
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
TODO
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
streams = {s.stream.name for s in configured_catalog.streams}
client = DatabendClient(**config)
writer = create_databend_wirter(client, logger)
for configured_stream in configured_catalog.streams:
if configured_stream.destination_sync_mode == DestinationSyncMode.overwrite:
writer.delete_table(configured_stream.stream.name)
logger.info(f"Stream {configured_stream.stream.name} is wiped.")
writer.create_raw_table(configured_stream.stream.name)
for message in input_messages:
if message.type == Type.STATE:
yield message
elif message.type == Type.RECORD:
data = message.record.data
stream = message.record.stream
# Skip unselected streams
if stream not in streams:
logger.debug(f"Stream {stream} was not present in configured streams, skipping")
continue
writer.queue_write_data(stream, str(uuid4()), datetime.now(), json.dumps(data))
# Flush any leftover messages
writer.flush()
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
client = DatabendClient(**config)
cursor = client.open()
cursor.execute("DROP TABLE IF EXISTS test")
cursor.execute("CREATE TABLE if not exists test (x Int32,y VARCHAR)")
cursor.execute("INSERT INTO test (x,y) VALUES (%,%)", [1, "yy", 2, "xx"])
cursor.execute("DROP TABLE IF EXISTS test")
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
| DestinationDatabend |
python | joke2k__faker | tests/providers/test_internet.py | {
"start": 18477,
"end": 19382
} | class ____:
"""Test ja_JP internet provider methods"""
def test_internet(self, faker):
names = JaPersonProvider.last_romanized_names
domain_word = faker.domain_word()
assert isinstance(domain_word, str)
assert any(domain_word == text.slugify(name) for name in names)
domain_name = faker.domain_name()
deep_domain_name = faker.domain_name(3)
assert isinstance(domain_name, str)
assert isinstance(deep_domain_name, str)
assert deep_domain_name.count(".") == 3
with pytest.raises(ValueError):
faker.domain_name(-1)
user_name = faker.user_name()
assert isinstance(user_name, str)
tld = faker.tld()
assert isinstance(tld, str)
def test_slug(self, faker):
num_of_samples = 100
for _ in range(num_of_samples):
assert faker.slug() != ""
| TestJaJp |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_pathconverter.py | {
"start": 168,
"end": 4176
} | class ____(util.MdCase):
"""Test relative paths."""
extension = ["pymdownx.pathconverter"]
extension_configs = {
"pymdownx.pathconverter": {
"base_path": CURRENT_DIR,
"relative_path": PARENT_DIR
}
}
def test_in_script(self):
"""Test that we do not parse image in script."""
self.check_markdown(
r'''
<script>
var str = '<img alt="picture" src="../test_extensions/_assets/bg.png" />'
</script>
''',
r'''
<script>
var str = '<img alt="picture" src="../test_extensions/_assets/bg.png" />'
</script>
''',
True
)
def test_comment(self):
"""Test comment."""
self.check_markdown(
r'<!--  -->',
r'<!--  -->'
)
def test_relative_path(self):
"""Test relative path."""
self.check_markdown(
r'',
r'<p><img alt="picture" src="test_extensions/_assets/bg.png" /></p>'
)
def test_file_win_file_path_root(self):
"""Test windows file:// path with root slash."""
self.check_markdown(
r'[file link windows abs](file:///c:/path/file.html)',
r'<p><a href="file:///c:/path/file.html">file link windows abs</a></p>'
)
def test_win_file_path(self):
"""Test windows file:// path."""
self.check_markdown(
r'[file link windows abs2](file://c:/path/file.html)',
r'<p><a href="file://c:/path/file.html">file link windows abs2</a></p>'
)
def test_file_root(self):
"""Test Linux/Unix style root file:// path."""
self.check_markdown(
r'[file link abs](file:///path/file.html)',
r'<p><a href="file:///path/file.html">file link abs</a></p>'
)
def test_root(self):
"""Test /root path."""
self.check_markdown(
r'[absolute](/absolute)',
r'<p><a href="/absolute">absolute</a></p>'
)
def test_url(self):
"""Test normal URL."""
self.check_markdown(
r'[link](http://www.google.com)',
r'<p><a href="http://www.google.com">link</a></p>'
)
def test_fragment(self):
"""Test HTML fragment."""
self.check_markdown(
r'[fragment](#fragment)',
r'<p><a href="#fragment">fragment</a></p>'
)
def test_windows(self):
"""Test Windows file path."""
self.check_markdown(
r'[windows path abs](c:/path/file.html)',
r'<p><a href="c:/path/file.html">windows path abs</a></p>'
)
def test_network_path(self):
"""Test network path."""
self.check_markdown(
r'[windows network path](//network/path/file.html)',
r'<p><a href="//network/path/file.html">windows network path</a></p>'
)
def test_strange_url(self):
"""Test strange URL."""
self.check_markdown(
r'[strange link](strange://odd/link/file.html)',
r'<p><a href="strange://odd/link/file.html">strange link</a></p>'
)
def test_strange_url2(self):
"""Test additional strange URL."""
self.check_markdown(
r'[strange link 2](strange://www.odd.com/link/file.html)',
r'<p><a href="strange://www.odd.com/link/file.html">strange link 2</a></p>'
)
def test_mail(self):
"""Test mail link."""
self.check_markdown(
r'<mail@mail.com>',
r'<p><a href="mailto:mail@mail'
r'.com">mail@mail.com</a></p>'
)
| TestRelative |
python | apache__airflow | airflow-core/src/airflow/policies.py | {
"start": 4116,
"end": 7086
} | class ____:
"""
Default implementations of the policy functions.
:meta private:
"""
# Default implementations of the policy functions
@staticmethod
@hookimpl
def get_dagbag_import_timeout(dag_file_path: str):
from airflow.configuration import conf
return conf.getfloat("core", "DAGBAG_IMPORT_TIMEOUT")
@staticmethod
@hookimpl
def get_airflow_context_vars(context):
return {}
def make_plugin_from_local_settings(pm: pluggy.PluginManager, module, names: set[str]):
"""
Turn the functions from airflow_local_settings module into a custom/local plugin.
Allows plugin-registered functions to co-operate with pluggy/setuptool
entrypoint plugins of the same methods.
Airflow local settings will be "win" (i.e. they have the final say) as they are the last plugin
registered.
:meta private:
"""
import inspect
import textwrap
import attr
hook_methods = set()
def _make_shim_fn(name, desired_sig, target):
# Functions defined in airflow_local_settings are called by positional parameters, so the names don't
# have to match what we define in the "template" policy.
#
# However Pluggy validates the names match (and will raise an error if they don't!)
#
# To maintain compat, if we detect the names don't match, we will wrap it with a dynamically created
# shim function that looks somewhat like this:
#
# def dag_policy_name_mismatch_shim(dag):
# airflow_local_settings.dag_policy(dag)
#
codestr = textwrap.dedent(
f"""
def {name}_name_mismatch_shim{desired_sig}:
return __target({" ,".join(desired_sig.parameters)})
"""
)
code = compile(codestr, "<policy-shim>", "single")
scope = {"__target": target}
exec(code, scope, scope)
return scope[f"{name}_name_mismatch_shim"]
@attr.define(frozen=True)
class AirflowLocalSettingsPolicy:
hook_methods: tuple[str, ...]
__name__ = "AirflowLocalSettingsPolicy"
def __dir__(self):
return self.hook_methods
for name in names:
if not hasattr(pm.hook, name):
continue
policy = getattr(module, name)
if not policy:
continue
local_sig = inspect.signature(policy)
policy_sig = inspect.signature(globals()[name])
# We only care if names/order/number of parameters match, not type hints
if local_sig.parameters.keys() != policy_sig.parameters.keys():
policy = _make_shim_fn(name, policy_sig, target=policy)
setattr(AirflowLocalSettingsPolicy, name, staticmethod(hookimpl(policy, specname=name)))
hook_methods.add(name)
if hook_methods:
pm.register(AirflowLocalSettingsPolicy(hook_methods=tuple(hook_methods)))
return hook_methods
| DefaultPolicy |
python | readthedocs__readthedocs.org | readthedocs/oauth/services/gitlab.py | {
"start": 869,
"end": 21233
} | class ____(UserService):
"""
Provider service for GitLab.
See:
- https://docs.gitlab.com/ce/integration/oauth_provider.html
- https://docs.gitlab.com/ce/api/oauth2.html
"""
allauth_provider = GitLabProvider
base_api_url = "https://gitlab.com"
supports_build_status = True
# Just use the network location to determine if it's a GitLab project
# because private repos have another base url, eg. git@gitlab.example.com
url_pattern = re.compile(
re.escape(urlparse(base_api_url).netloc),
)
PERMISSION_NO_ACCESS = 0
PERMISSION_MAINTAINER = 40
PERMISSION_OWNER = 50
vcs_provider_slug = GITLAB
def _get_repo_id(self, project):
"""
Get the ID or URL-encoded path of the project.
See https://docs.gitlab.com/ce/api/README.html#namespaced-path-encoding.
"""
if project.remote_repository:
repo_id = project.remote_repository.remote_id
else:
# Handle "Manual Import" when there is no RemoteRepository
# associated with the project. It only works with gitlab.com at the
# moment (doesn't support custom gitlab installations)
username, repo = build_utils.get_gitlab_username_repo(project.repo)
if (username, repo) == (None, None):
return None
repo_id = quote_plus(f"{username}/{repo}")
return repo_id
def get_next_url_to_paginate(self, response):
return response.links.get("next", {}).get("url")
def get_paginated_results(self, response):
return response.json()
def sync_repositories(self):
"""
Sync repositories that the user has access to.
See https://docs.gitlab.com/api/projects/#list-a-users-projects.
"""
remote_ids = []
try:
repos = self.paginate(
f"{self.base_api_url}/api/v4/projects",
membership=True,
per_page=100,
archived=False,
order_by="path",
sort="asc",
)
for repo in repos:
remote_repository = self.create_repository(repo)
if remote_repository:
remote_ids.append(remote_repository.remote_id)
except (TypeError, ValueError):
log.warning("Error syncing GitLab repositories")
raise SyncServiceError(
SyncServiceError.INVALID_OR_REVOKED_ACCESS_TOKEN.format(
provider=self.vcs_provider_slug
)
)
return remote_ids
def sync_organizations(self):
"""
Sync GitLab groups (organizations).
This method only creates the relationships between the
organizations and the user, as all the repositories
are already created in the sync_repositories method.
"""
organization_remote_ids = []
try:
orgs = self.paginate(
f"{self.base_api_url}/api/v4/groups",
per_page=100,
all_available=False,
order_by="path",
sort="asc",
)
for org in orgs:
remote_organization = self.create_organization(org)
remote_organization.get_remote_organization_relation(self.user, self.account)
organization_remote_ids.append(remote_organization.remote_id)
except (TypeError, ValueError):
log.warning("Error syncing GitLab organizations")
raise SyncServiceError(
SyncServiceError.INVALID_OR_REVOKED_ACCESS_TOKEN.format(
provider=self.vcs_provider_slug
)
)
return organization_remote_ids, []
def _has_access_to_repository(self, fields):
"""Check if the user has access to the repository, and if they are an admin."""
permissions = fields.get("permissions", {})
project_access = permissions.get("project_access") or {}
project_access_level = project_access.get("access_level", self.PERMISSION_NO_ACCESS)
group_access = permissions.get("group_access") or {}
group_access_level = group_access.get("access_level", self.PERMISSION_NO_ACCESS)
has_access = (
group_access_level != self.PERMISSION_NO_ACCESS
or project_access_level != self.PERMISSION_NO_ACCESS
)
project_admin = project_access_level in (self.PERMISSION_MAINTAINER, self.PERMISSION_OWNER)
group_admin = group_access_level in (self.PERMISSION_MAINTAINER, self.PERMISSION_OWNER)
return has_access, project_admin or group_admin
def update_repository(self, remote_repository: RemoteRepository):
resp = self.session.get(
f"{self.base_api_url}/api/v4/projects/{remote_repository.remote_id}"
)
if resp.status_code in [403, 404]:
log.info(
"User no longer has access to the repository, removing remote relationship.",
remote_repository_id=remote_repository.remote_id,
)
remote_repository.get_remote_repository_relation(self.user, self.account).delete()
return
if resp.status_code != 200:
log.warning(
"Error fetching repository from GitLab",
remote_repository_id=remote_repository.remote_id,
status_code=resp.status_code,
)
return
data = resp.json()
self._update_repository_from_fields(remote_repository, data)
has_access, is_admin = self._has_access_to_repository(data)
relation = remote_repository.get_remote_repository_relation(
self.user,
self.account,
)
if not has_access:
# If the user no longer has access to the repository,
# we remove the remote relationship.
log.info(
"User no longer has access to the repository, removing remote relationship.",
remote_repository=remote_repository.remote_id,
)
relation.delete()
else:
relation.admin = is_admin
relation.save()
def create_repository(self, fields, privacy=None):
"""
Update or create a repository from GitLab API response.
``admin`` field is computed using the ``permissions`` fields from the
repository response. The permission from GitLab is given by an integer:
* 0: No access
* (... others ...)
* 40: Maintainer
* 50: Owner
https://docs.gitlab.com/ee/api/access_requests.html
https://gitlab.com/help/user/permissions
:param fields: dictionary of response data from API
:param privacy: privacy level to support
:rtype: RemoteRepository
"""
privacy = privacy or settings.DEFAULT_PRIVACY_LEVEL
repo_is_public = fields["visibility"] == "public"
if privacy == "private" or (repo_is_public and privacy == "public"):
repo, _ = RemoteRepository.objects.get_or_create(
remote_id=fields["id"], vcs_provider=self.vcs_provider_slug
)
self._update_repository_from_fields(repo, fields)
remote_repository_relation = repo.get_remote_repository_relation(
self.user, self.account
)
_, is_admin = self._has_access_to_repository(fields)
remote_repository_relation.admin = is_admin
remote_repository_relation.save()
return repo
log.info(
"Not importing repository because mismatched type.",
repository=fields["path_with_namespace"],
visibility=fields["visibility"],
)
def _update_repository_from_fields(self, repo, fields):
# If the namespace is a group, we can use it as the organization
if fields.get("namespace", {}).get("kind") == "group":
organization = self.create_organization(fields["namespace"])
repo.organization = organization
else:
repo.organization = None
repo.name = fields["name"]
repo.full_name = fields["path_with_namespace"]
repo.description = fields["description"]
repo.ssh_url = fields["ssh_url_to_repo"]
repo.html_url = fields["web_url"]
repo.vcs = "git"
repo.private = fields["visibility"] == "private"
repo.default_branch = fields.get("default_branch")
owner = fields.get("owner") or {}
repo.avatar_url = self._make_absolute_url(
fields.get("avatar_url") or owner.get("avatar_url")
)
if not repo.avatar_url:
repo.avatar_url = self.default_user_avatar_url
if repo.private:
repo.clone_url = repo.ssh_url
else:
repo.clone_url = fields["http_url_to_repo"]
repo.save()
def _make_absolute_url(self, url):
"""
Make sure the URL is absolute to gitlab.com.
If the URL is relative, prepend the base API URL.
"""
if url and not url.startswith("http"):
return f"https://gitlab.com{url}"
return url
def create_organization(self, fields):
"""
Update or create remote organization from GitLab API response.
:param fields: dictionary response of data from API
:rtype: RemoteOrganization
.. note::
This method caches organizations by their remote ID to avoid
unnecessary database queries, specially when creating
multiple repositories that belong to the same organization.
"""
organization_id = fields["id"]
if organization_id in self._organizations_cache:
return self._organizations_cache[organization_id]
organization, _ = RemoteOrganization.objects.get_or_create(
remote_id=organization_id,
vcs_provider=self.vcs_provider_slug,
)
organization.name = fields.get("name")
organization.slug = fields.get("full_path")
organization.url = fields.get("web_url")
organization.avatar_url = self._make_absolute_url(fields.get("avatar_url"))
if not organization.avatar_url:
organization.avatar_url = self.default_user_avatar_url
organization.save()
self._organizations_cache[organization_id] = organization
return organization
def get_webhook_data(self, repo_id, project, integration):
"""
Get webhook JSON data to post to the API.
See: http://doc.gitlab.com/ce/api/projects.html#add-project-hook
"""
return json.dumps(
{
"id": repo_id,
"push_events": True,
"tag_push_events": True,
"url": self.get_webhook_url(project, integration),
"token": integration.secret,
# Optional
"issues_events": False,
"merge_requests_events": True,
"note_events": False,
"job_events": False,
"pipeline_events": False,
"wiki_events": False,
}
)
def get_provider_data(self, project, integration):
"""
Gets provider data from GitLab Webhooks API.
:param project: project
:type project: Project
:param integration: Integration for the project
:type integration: Integration
:returns: Dictionary containing provider data from the API or None
:rtype: dict
"""
if integration.provider_data:
return integration.provider_data
repo_id = self._get_repo_id(project)
if repo_id is None:
return None
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
integration_id=integration.pk,
)
rtd_webhook_url = self.get_webhook_url(project, integration)
try:
resp = self.session.get(
"{url}/api/v4/projects/{repo_id}/hooks".format(
url=self.base_api_url,
repo_id=repo_id,
),
)
if resp.status_code == 200:
recv_data = resp.json()
for webhook_data in recv_data:
if webhook_data["url"] == rtd_webhook_url:
integration.provider_data = webhook_data
integration.save()
log.info(
"GitLab integration updated with provider data for project.",
)
break
else:
log.info("GitLab project does not exist or user does not have permissions.")
except Exception:
log.exception("GitLab webhook Listing failed for project.")
return integration.provider_data
def setup_webhook(self, project, integration=None) -> bool:
"""
Set up GitLab project webhook for project.
:param project: project to set up webhook for
:type project: Project
:param integration: Integration for a project
:type integration: Integration
:returns: boolean based on webhook set up success
:rtype: bool
"""
resp = None
if not integration:
integration, _ = Integration.objects.get_or_create(
project=project,
integration_type=Integration.GITLAB_WEBHOOK,
)
repo_id = self._get_repo_id(project)
url = f"{self.base_api_url}/api/v4/projects/{repo_id}/hooks"
if repo_id is None:
return False
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
integration_id=integration.pk,
url=url,
)
data = self.get_webhook_data(repo_id, project, integration)
try:
resp = self.session.post(
url,
data=data,
headers={"content-type": "application/json"},
)
structlog.contextvars.bind_contextvars(http_status_code=resp.status_code)
if resp.status_code == 201:
integration.provider_data = resp.json()
integration.save()
log.debug("GitLab webhook creation successful for project.")
return True
if resp.status_code in [401, 403, 404]:
log.info("Gitlab project does not exist or user does not have permissions.")
else:
log.warning("GitLab webhook creation failed. Unknown response from GitLab.")
except Exception:
log.exception("GitLab webhook creation failed.")
return False
def update_webhook(self, project, integration) -> bool:
"""
Update webhook integration.
:param project: project to set up webhook for
:type project: Project
:param integration: Webhook integration to update
:type integration: Integration
:returns: boolean based on webhook update success, and requests Response
object
"""
provider_data = self.get_provider_data(project, integration)
# Handle the case where we don't have a proper provider_data set
# This happens with a user-managed webhook previously
if not provider_data:
return self.setup_webhook(project, integration)
resp = None
repo_id = self._get_repo_id(project)
if repo_id is None:
return False
data = self.get_webhook_data(repo_id, project, integration)
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
integration_id=integration.pk,
)
try:
hook_id = provider_data.get("id")
resp = self.session.put(
"{url}/api/v4/projects/{repo_id}/hooks/{hook_id}".format(
url=self.base_api_url,
repo_id=repo_id,
hook_id=hook_id,
),
data=data,
headers={"content-type": "application/json"},
)
if resp.status_code == 200:
recv_data = resp.json()
integration.provider_data = recv_data
integration.save()
log.info("GitLab webhook update successful for project.")
return True
# GitLab returns 404 when the webhook doesn't exist. In this case,
# we call ``setup_webhook`` to re-configure it from scratch
if resp.status_code == 404:
return self.setup_webhook(project, integration)
except Exception:
try:
debug_data = resp.json()
except ValueError:
debug_data = resp.content
except Exception:
debug_data = None
log.exception(
"GitLab webhook update failed.",
debug_data=debug_data,
)
return False
def send_build_status(self, *, build, commit, status):
"""
Create GitLab commit status for project.
:param build: Build to set up commit status for
:type build: Build
:param status: build status failure, pending, or success.
:type status: str
:param commit: commit sha of the pull request
:type commit: str
:returns: boolean based on commit status creation was successful or not.
:rtype: Bool
"""
resp = None
project = build.project
repo_id = self._get_repo_id(project)
if repo_id is None:
return (False, resp)
# select the correct status and description.
gitlab_build_state = SELECT_BUILD_STATUS[status]["gitlab"]
description = SELECT_BUILD_STATUS[status]["description"]
if status == BUILD_STATUS_SUCCESS:
# Link to the documentation for this version
target_url = build.version.get_absolute_url()
else:
# Link to the build detail's page
target_url = build.get_full_url()
context = f"{settings.RTD_BUILD_STATUS_API_NAME}:{project.slug}"
data = {
"state": gitlab_build_state,
"target_url": target_url,
"description": description,
"context": context,
}
url = f"{self.base_api_url}/api/v4/projects/{repo_id}/statuses/{commit}"
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
commit_status=gitlab_build_state,
user_username=self.user.username,
url=url,
)
try:
resp = self.session.post(
url,
data=json.dumps(data),
headers={"content-type": "application/json"},
)
structlog.contextvars.bind_contextvars(http_status_code=resp.status_code)
if resp.status_code == 201:
log.debug("GitLab commit status created for project.")
return True
if resp.status_code in [401, 403, 404]:
log.info("GitLab project does not exist or user does not have permissions.")
return False
return False
# Catch exceptions with request or deserializing JSON
except (RequestException, ValueError):
# Response data should always be JSON, still try to log if not
# though
if resp is not None:
try:
debug_data = resp.json()
except ValueError:
debug_data = resp.content
else:
debug_data = resp
log.exception(
"GitLab commit status creation failed.",
debug_data=debug_data,
)
except InvalidGrantError:
log.info("Invalid GitLab grant for user.", exc_info=True)
except TokenExpiredError:
log.info("GitLab token expired for user.", exc_info=True)
return False
| GitLabService |
python | getsentry__sentry | src/sentry/grouping/component.py | {
"start": 1208,
"end": 8657
} | class ____[ValuesType: str | int | BaseGroupingComponent[Any]](ABC):
"""
A grouping component is a node in a tree describing the event data (exceptions, stacktraces,
messages, etc.) which can contribute to grouping. Each node's children, stored in the `values`
attribute, are either other grouping components or primitives representing the actual data.
For example, an exception component might have type, value, and stacktrace components as
children, and the type component might have the string "KeyError" as its child.
"""
hint: str | None = None
contributes: bool = False
values: Sequence[ValuesType]
def __init__(
self,
hint: str | None = None,
contributes: bool | None = None,
values: Sequence[ValuesType] | None = None,
):
# Use `upate` to set attribute values because it ensures `contributes` is set (if
# `contributes` is not provided, `update` will derive it from the `values` value)
self.update(
hint=hint,
contributes=contributes,
values=values or [],
)
@property
@abstractmethod
def id(self) -> str: ...
@property
def name(self) -> str | None:
return KNOWN_MAJOR_COMPONENT_NAMES.get(self.id)
@property
def key(self) -> str:
return self.name or self.id
@cached_property
def description(self) -> str:
"""
Build the component description by walking its component tree and collecting the names of
contributing "major" components, to find the longest path of qualifying components from root
to leaf. (See `KNOWN_MAJOR_COMPONENT_NAMES` above.)
"""
# Keep track of the paths we walk so later we can pick the longest one
paths = []
def _walk_components(
component: BaseGroupingComponent[Any], current_path: list[str | None]
) -> None:
# Keep track of the names of the nodes from the root of the component tree to here
current_path.append(component.name)
# Walk the tree, looking for contributing components.
for value in component.values:
if isinstance(value, BaseGroupingComponent) and value.contributes:
_walk_components(value, current_path)
# Filter out the `None`s (which come from components not in `KNOWN_MAJOR_COMPONENT_NAMES`)
# before adding our current path to the list of possible longest paths
paths.append([name for name in current_path if name])
# We're about to finish processing this node, so pop it out of the path
current_path.pop()
# Find the longest path of contributing major components
_walk_components(self, [])
paths.sort(key=lambda x: (len(x), x))
if paths and paths[-1]:
return " ".join(paths[-1])
return self.name or self.id
def get_subcomponent(
self, id: str, recursive: bool = False, only_contributing: bool = False
) -> BaseGroupingComponent[Any] | None:
"""
Looks up a subcomponent by id and returns the first instance found, or `None` if no
instances are found.
Unless `recursive=True` is passed, only direct children (the components in `self.values`)
are checked.
By default, any matching result will be returned. To filter out non-contributing components,
pass `only_contributing=True`. (Note that if a component has `contributes = True` but has a
non-contributing ancestor, the component is not considered contributing for purposes of this
method.)
"""
return next(
self.iter_subcomponents(
id=id, recursive=recursive, only_contributing=only_contributing
),
None,
)
def iter_subcomponents(
self, id: str, recursive: bool = False, only_contributing: bool = False
) -> Iterator[BaseGroupingComponent[Any] | None]:
"""Finds all subcomponents matching an id, optionally recursively."""
for value in self.values:
if isinstance(value, BaseGroupingComponent):
if only_contributing and not value.contributes:
continue
if value.id == id:
yield value
if recursive:
yield from value.iter_subcomponents(
id, recursive=True, only_contributing=only_contributing
)
yield from () # yield an empty generator
def update(
self,
hint: str | None = None,
contributes: bool | None = None,
values: Sequence[ValuesType] | None = None,
) -> None:
"""Updates an already existing component with new values."""
if hint is not None:
self.hint = hint
if values is not None:
if contributes is None:
contributes = _calculate_contributes(values)
# Ensure components which wrap primitives only ever have one child
if len(values) > 0 and any(isinstance(value, (int, str)) for value in values):
try:
assert (
len(values) == 1
), f"Components which wrap primitives can wrap at most one value. Got {values}."
except AssertionError as e:
if in_test_environment():
raise
sentry_sdk.capture_exception(e)
self.values = values
if contributes is not None:
self.contributes = contributes
def iter_values(self) -> Generator[str | int]:
"""
Recursively walks the component tree, gathering literal values from contributing
branches into a flat list.
"""
if self.contributes:
for value in self.values:
if isinstance(value, BaseGroupingComponent):
yield from value.iter_values()
else:
yield value
yield from () # yield an empty generator
def get_hash(self) -> str | None:
"""Returns the hash of the values if it contributes."""
if self.contributes:
return hash_from_values(self.iter_values())
return None
def as_dict(self) -> dict[str, Any]:
"""Converts the component tree into a dictionary."""
rv: dict[str, Any] = {
"id": self.id,
"name": self.name,
"contributes": self.contributes,
"hint": self.hint,
"values": [],
}
for value in self.values:
if isinstance(value, BaseGroupingComponent):
rv["values"].append(value.as_dict())
else:
# This basically assumes that a value is only a primitive
# and never an object or list. This should be okay
# because we verify this.
rv["values"].append(value)
return rv
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.id!r}, hint={self.hint!r}, contributes={self.contributes!r}, values={self.values!r})"
# NOTE: In all of the classes below, the type(s) passed to `BaseGroupingComponent` represent
# the type(s) which can appear in the `values` attribute
# Error-related inner components
| BaseGroupingComponent |
python | getsentry__sentry | src/sentry/integrations/discord/requests/base.py | {
"start": 919,
"end": 1122
} | class ____(Exception):
"""
Something was invalid about the request from Discord.
Includes the status the endpoint should return, based on the error.
"""
status: int
| DiscordRequestError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 927370,
"end": 927908
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of RemoveEnterpriseSupportEntitlement"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of removing the support
entitlement.
"""
| RemoveEnterpriseSupportEntitlementPayload |
python | readthedocs__readthedocs.org | readthedocs/core/adapters.py | {
"start": 2438,
"end": 7240
} | class ____(DefaultSocialAccountAdapter):
def pre_social_login(self, request, sociallogin):
self._filter_email_addresses(sociallogin)
self._block_use_of_old_github_oauth_app(request, sociallogin)
self._connect_github_app_to_existing_github_account(request, sociallogin)
def _filter_email_addresses(self, sociallogin):
"""
Remove all email addresses except the primary one.
We don't want to populate all email addresses from the social account,
it also makes it easy to mark only the primary email address as verified
for providers that don't return information about email verification
even if the email is verified (like GitLab).
"""
sociallogin.email_addresses = [
email for email in sociallogin.email_addresses if email.primary
]
def _connect_github_app_to_existing_github_account(self, request, sociallogin):
"""
Connect a GitHub App (new integration) account to an existing GitHub account (old integration).
When a user signs up with the GitHub App we check if there is an existing GitHub account,
and if it belongs to the same user, we connect the accounts instead of creating a new one.
"""
provider = sociallogin.account.get_provider()
# If the provider is not GitHub App, nothing to do.
if provider.id != GitHubAppProvider.id:
return
# If the user already signed up with the GitHub App, nothing to do.
if sociallogin.is_existing:
return
social_account = SocialAccount.objects.filter(
provider=GitHubProvider.id,
uid=sociallogin.account.uid,
).first()
# If there is an existing GH account, we check if that user can use the GH App,
# otherwise we check for the current user.
user_to_check = social_account.user if social_account else request.user
if not self._can_use_github_app(user_to_check):
raise ImmediateHttpResponse(HttpResponseRedirect(reverse("account_login")))
# If there isn't an existing GH account, nothing to do,
# just let allauth create the new account.
if not social_account:
return
# If the user is logged in, and the GH OAuth account belongs to
# a different user, we should not connect the accounts,
# this is the same as trying to connect an existing GH account to another user.
if request.user.is_authenticated and request.user != social_account.user:
message_template = "socialaccount/messages/account_connected_other.txt"
get_account_adapter(request).add_message(
request=request,
level=messages.ERROR,
message_template=message_template,
)
url = reverse("socialaccount_connections")
raise ImmediateHttpResponse(HttpResponseRedirect(url))
sociallogin.connect(request, social_account.user)
def _can_use_github_app(self, user):
"""
Check if the user can use the GitHub App.
Only staff users can use the GitHub App for now.
"""
return settings.RTD_ALLOW_GITHUB_APP or user.is_staff
def _block_use_of_old_github_oauth_app(self, request, sociallogin):
"""
Block the use of the old GitHub OAuth app if the user is already using the new GitHub App.
This is a temporary measure to block the use of the old GitHub OAuth app
until we switch our login to always use the new GitHub App.
If the user has its account still connected to the old GitHub OAuth app,
we allow them to use it, since there is no difference between using the two apps
for logging in.
"""
provider = sociallogin.account.get_provider()
# If the provider is not GitHub, nothing to do.
if provider.id != GitHubProvider.id:
return
# If the user is still using the old GitHub OAuth app, nothing to do.
if sociallogin.is_existing:
return
has_gh_app_social_account = SocialAccount.objects.filter(
provider=GitHubAppProvider.id,
uid=sociallogin.account.uid,
).exists()
# If there is no existing GitHub App account, nothing to do.
if not has_gh_app_social_account:
return
# Show a warning to the user and redirect them to the GitHub App login page.
messages.warning(
request,
"You already migrated from our old GitHub OAuth app. "
"Click below to sign in with the new GitHub App.",
)
url = reverse("githubapp_login")
raise ImmediateHttpResponse(HttpResponseRedirect(url))
| SocialAccountAdapter |
python | scipy__scipy | scipy/linalg/tests/test_decomp.py | {
"start": 73990,
"end": 78541
} | class ____:
def test_simple(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(3))
assert_array_almost_equal(r @ q, a)
def test_r(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
r, q = rq(a)
r2 = rq(a, mode='r')
assert_array_almost_equal(r, r2)
def test_random(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n])
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(n))
assert_array_almost_equal(r @ q, a)
def test_simple_trap(self):
a = [[8, 2, 3], [2, 9, 3]]
r, q = rq(a)
assert_array_almost_equal(q.T @ q, eye(3))
assert_array_almost_equal(r @ q, a)
def test_simple_tall(self):
a = [[8, 2], [2, 9], [5, 3]]
r, q = rq(a)
assert_array_almost_equal(q.T @ q, eye(2))
assert_array_almost_equal(r @ q, a)
def test_simple_fat(self):
a = [[8, 2, 5], [2, 9, 3]]
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(3))
assert_array_almost_equal(r @ q, a)
def test_simple_complex(self):
a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]
r, q = rq(a)
assert_array_almost_equal(q @ q.conj().T, eye(3))
assert_array_almost_equal(r @ q, a)
def test_random_tall(self):
rng = np.random.RandomState(1234)
m = 200
n = 100
for k in range(2):
a = rng.random([m, n])
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(n))
assert_array_almost_equal(r @ q, a)
def test_random_trap(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n])
r, q = rq(a)
assert_array_almost_equal(q @ q.T, eye(n))
assert_array_almost_equal(r @ q, a)
def test_random_trap_economic(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n])
r, q = rq(a, mode='economic')
assert_array_almost_equal(q @ q.T, eye(m))
assert_array_almost_equal(r @ q, a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_random_complex(self):
rng = np.random.RandomState(1234)
n = 20
for k in range(2):
a = rng.random([n, n]) + 1j*rng.random([n, n])
r, q = rq(a)
assert_array_almost_equal(q @ q.conj().T, eye(n))
assert_array_almost_equal(r @ q, a)
def test_random_complex_economic(self):
rng = np.random.RandomState(1234)
m = 100
n = 200
for k in range(2):
a = rng.random([m, n]) + 1j*rng.random([m, n])
r, q = rq(a, mode='economic')
assert_array_almost_equal(q @ q.conj().T, eye(m))
assert_array_almost_equal(r @ q, a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_check_finite(self):
a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]]
r, q = rq(a, check_finite=False)
assert_array_almost_equal(q @ q.T, eye(3))
assert_array_almost_equal(r @ q, a)
@pytest.mark.parametrize("m", [0, 1, 2])
@pytest.mark.parametrize("n", [0, 1, 2])
@pytest.mark.parametrize('dtype', DTYPES)
def test_shape_dtype(self, m, n, dtype):
k = min(m, n)
a = np.zeros((m, n), dtype=dtype)
r, q = rq(a)
assert_equal(q.shape, (n, n))
assert_equal(r.shape, (m, n))
assert_equal(r.dtype, dtype)
assert_equal(q.dtype, dtype)
r = rq(a, mode='r')
assert_equal(r.shape, (m, n))
assert_equal(r.dtype, dtype)
r, q = rq(a, mode='economic')
assert_equal(r.shape, (m, k))
assert_equal(r.dtype, dtype)
assert_equal(q.shape, (k, n))
assert_equal(q.dtype, dtype)
@pytest.mark.parametrize(("m", "n"), [(0, 0), (0, 2), (2, 0)])
def test_empty(self, m, n):
k = min(m, n)
a = np.empty((m, n))
r, q = rq(a)
assert_allclose(r, np.empty((m, n)))
assert_allclose(q, np.identity(n))
r = rq(a, mode='r')
assert_allclose(r, np.empty((m, n)))
r, q = rq(a, mode='economic')
assert_allclose(r, np.empty((m, k)))
assert_allclose(q, np.empty((k, n)))
| TestRQ |
python | bokeh__bokeh | tests/unit/bokeh/document/test_document.py | {
"start": 2814,
"end": 3172
} | class ____(CDSDerivedDataModel):
prop3 = Instance(SomeDataModel, default=SomeDataModel(prop0=-1))
data = Override(default={"default_column": [7, 8, 9]})
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
| CDSDerivedDerivedDataModel |
python | ansible__ansible | lib/ansible/errors/__init__.py | {
"start": 13562,
"end": 13987
} | class ____(AnsibleAction):
"""
Imports as `_AnsibleActionDone` are deprecated. An action runtime early exit.
This exception provides a result dictionary via the ContributesToTaskResult mixin.
"""
@property
def omit_failed_key(self) -> bool:
return not self._result.get('failed')
@property
def omit_exception_key(self) -> bool:
return not self._result.get('failed')
| _ActionDone |
python | doocs__leetcode | solution/1000-1099/1099.Two Sum Less Than K/Solution2.py | {
"start": 0,
"end": 331
} | class ____:
def twoSumLessThanK(self, nums: List[int], k: int) -> int:
nums.sort()
i, j = 0, len(nums) - 1
ans = -1
while i < j:
if (s := nums[i] + nums[j]) < k:
ans = max(ans, s)
i += 1
else:
j -= 1
return ans
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/table/test_table11.py | {
"start": 518,
"end": 2781
} | class ____(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
dxf_format = Format()
dxf_format.dxf_index = 0
# Set the table properties.
worksheet.add_table(
"C2:F14",
{
"total_row": 1,
"columns": [
{"total_string": "Total"},
{},
{},
{
"total_function": "count",
"format": dxf_format,
"formula": "SUM(Table1[[#This Row],[Column1]:[Column3]])",
},
],
},
)
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C2:F14" totalsRowCount="1">
<autoFilter ref="C2:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Column1" totalsRowLabel="Total"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Column4" totalsRowFunction="count" dataDxfId="0">
<calculatedColumnFormula>SUM(Table1[[#This Row],[Column1]:[Column3]])</calculatedColumnFormula>
</tableColumn>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleTable |
python | OmkarPathak__pygorithm | tests/test_data_structure.py | {
"start": 5944,
"end": 11076
} | class ____(unittest.TestCase):
def test_topological_sort(self):
myGraph = graph.TopologicalSort()
myGraph.add_edge(5, 2)
myGraph.add_edge(5, 0)
myGraph.add_edge(4, 0)
myGraph.add_edge(4, 1)
myGraph.add_edge(2, 3)
myGraph.add_edge(3, 1)
ans = myGraph.topological_sort()
expectedResult = [5, 4, 2, 3, 1, 0]
self.assertEqual(ans, expectedResult)
def test_cycle_in_directed_graph(self):
myGraph = graph.CheckCycleDirectedGraph()
myGraph.add_edge(0, 1)
myGraph.add_edge(0, 2)
myGraph.add_edge(1, 2)
myGraph.add_edge(2, 0)
myGraph.add_edge(2, 3)
myGraph.add_edge(3, 3)
self.assertTrue(myGraph.check_cycle())
def test_add_edge_in_undirected_graph(self):
myGraph = graph.CheckCycleUndirectedGraph()
myGraph.add_edge(0, 1)
myGraph.add_edge(0, 2)
setFrom0 = myGraph.graph[0]
setFrom1 = myGraph.graph[1]
setFrom2 = myGraph.graph[2]
self.assertIsNotNone(setFrom0)
self.assertIsNotNone(setFrom1)
self.assertIsNotNone(setFrom2)
self.assertIn(1, setFrom0)
self.assertIn(0, setFrom1)
self.assertIn(2, setFrom0)
self.assertIn(0, setFrom2)
def test_cycle_in_undirected_graph(self):
myGraph = graph.CheckCycleUndirectedGraph()
myGraph.add_edge(0, 1)
myGraph.add_edge(0, 2)
myGraph.add_edge(1, 2)
myGraph.add_edge(2, 0)
myGraph.add_edge(2, 3)
myGraph.add_edge(3, 3)
self.assertTrue(myGraph.check_cycle())
def test_creating_weighted_undirected_graph(self):
myGraph = graph.WeightedUndirectedGraph()
myGraph.add_edge(0, 1, 1)
self.assertIn(0, myGraph.graph[1])
self.assertIn(1, myGraph.graph[0])
self.assertEqual(1, myGraph.get_edge_weight(0, 1))
self.assertEqual(1, myGraph.get_edge_weight(1, 0))
myGraph.add_edge(0, 2, 3)
self.assertIn(0, myGraph.graph[2])
self.assertIn(0, myGraph.graph[1])
self.assertIn(1, myGraph.graph[0])
self.assertIn(2, myGraph.graph[0])
self.assertEqual(1, myGraph.get_edge_weight(0, 1))
self.assertEqual(1, myGraph.get_edge_weight(1, 0))
self.assertEqual(3, myGraph.get_edge_weight(0, 2))
self.assertEqual(3, myGraph.get_edge_weight(2, 0))
myGraph.add_edge(2, 3, 7)
self.assertIn(0, myGraph.graph[2])
self.assertIn(3, myGraph.graph[2])
self.assertIn(2, myGraph.graph[3])
self.assertNotIn(0, myGraph.graph[3])
self.assertNotIn(3, myGraph.graph[0])
self.assertEqual(7, myGraph.get_edge_weight(2, 3))
self.assertIsNone(myGraph.get_edge_weight(0, 3))
def test_removing_from_weighted_undirected_graph(self):
myGraph = graph.WeightedUndirectedGraph()
myGraph.add_edge(0, 1, 1)
myGraph.add_edge(0, 2, 1)
myGraph.add_edge(0, 3, 1)
myGraph.add_edge(0, 4, 1)
myGraph.add_edge(4, 5, 1)
myGraph.add_edge(2, 6, 1)
self.assertEqual(1, myGraph.get_edge_weight(0, 1))
self.assertEqual(1, myGraph.get_edge_weight(0, 2))
self.assertEqual(1, myGraph.get_edge_weight(0, 3))
self.assertEqual(1, myGraph.get_edge_weight(0, 4))
self.assertEqual(1, myGraph.get_edge_weight(4, 5))
self.assertEqual(1, myGraph.get_edge_weight(2, 6))
myGraph.remove_edge(0, 1)
self.assertIsNone(myGraph.get_edge_weight(0, 1))
self.assertEqual(1, myGraph.get_edge_weight(0, 2))
self.assertEqual(1, myGraph.get_edge_weight(0, 3))
self.assertEqual(1, myGraph.get_edge_weight(0, 4))
self.assertEqual(1, myGraph.get_edge_weight(4, 5))
self.assertEqual(1, myGraph.get_edge_weight(2, 6))
myGraph.remove_edge(0, 2)
self.assertIsNone(myGraph.get_edge_weight(0, 1))
self.assertIsNone(myGraph.get_edge_weight(0, 2))
self.assertEqual(1, myGraph.get_edge_weight(0, 3))
self.assertEqual(1, myGraph.get_edge_weight(0, 4))
self.assertEqual(1, myGraph.get_edge_weight(4, 5))
self.assertEqual(1, myGraph.get_edge_weight(2, 6))
myGraph.remove_edge(0)
self.assertIsNone(myGraph.get_edge_weight(0, 1))
self.assertIsNone(myGraph.get_edge_weight(0, 2))
self.assertIsNone(myGraph.get_edge_weight(0, 3))
self.assertIsNone(myGraph.get_edge_weight(0, 4))
self.assertEqual(1, myGraph.get_edge_weight(4, 5))
self.assertEqual(1, myGraph.get_edge_weight(2, 6))
def test_gridify_weighted_undirected_graph(self):
rt2 = 1.4142135623730951
myGraph = graph.WeightedUndirectedGraph()
myGraph.gridify(4, 1)
self.assertEqual(1, myGraph.get_edge_weight((0, 0), (0, 1)))
self.assertAlmostEqual(rt2, myGraph.get_edge_weight((0, 0), (1, 1)))
self.assertIsNone(myGraph.get_edge_weight((0, 0), (2, 0)))
self.assertEqual(1, myGraph.get_edge_weight((2, 3), (3, 3)))
self.assertIsNone(myGraph.get_edge_weight((3, 3), (3, 4)))
| TestGraph |
python | doocs__leetcode | solution/2500-2599/2548.Maximum Price to Fill a Bag/Solution.py | {
"start": 0,
"end": 293
} | class ____:
def maxPrice(self, items: List[List[int]], capacity: int) -> float:
ans = 0
for p, w in sorted(items, key=lambda x: x[1] / x[0]):
v = min(w, capacity)
ans += v / w * p
capacity -= v
return -1 if capacity else ans
| Solution |
python | tornadoweb__tornado | demos/facebook/facebook.py | {
"start": 1756,
"end": 1998
} | class ____(tornado.web.RequestHandler):
def get_current_user(self):
user_json = self.get_signed_cookie("fbdemo_user")
if not user_json:
return None
return tornado.escape.json_decode(user_json)
| BaseHandler |
python | pytorch__pytorch | torch/_dynamo/output_graph.py | {
"start": 7954,
"end": 8429
} | class ____(torch.nn.Module):
"""Trick the constructor of fx.GraphModule"""
def __init__(self, nn_modules: dict[str, torch.nn.Module]):
super().__init__()
for k, v in nn_modules.items():
setattr(self, k, v)
def __repr__(self) -> str:
return "FakeRootModule(...)"
def add_nn_modules(self, nn_modules: dict[str, torch.nn.Module]) -> None:
for k, v in nn_modules.items():
setattr(self, k, v)
| FakeRootModule |
python | huggingface__transformers | tests/models/detr/test_modeling_detr.py | {
"start": 1362,
"end": 6496
} | class ____:
def __init__(
self,
parent,
batch_size=8,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=8,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
num_queries=12,
num_channels=3,
min_size=200,
max_size=200,
n_targets=8,
num_labels=91,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_queries = num_queries
self.num_channels = num_channels
self.min_size = min_size
self.max_size = max_size
self.n_targets = n_targets
self.num_labels = num_labels
# we also set the expected seq length for both encoder and decoder
self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32)
self.decoder_seq_length = self.num_queries
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size])
pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device)
labels.append(target)
config = self.get_config()
return config, pixel_values, pixel_mask, labels
def get_config(self):
resnet_config = ResNetConfig(
num_channels=3,
embeddings_size=10,
hidden_sizes=[10, 20, 30, 40],
depths=[1, 1, 2, 1],
hidden_act="relu",
num_labels=3,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
)
return DetrConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
num_queries=self.num_queries,
num_labels=self.num_labels,
use_timm_backbone=False,
backbone_config=resnet_config,
backbone=None,
use_pretrained_backbone=False,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def create_and_check_detr_model(self, config, pixel_values, pixel_mask, labels):
model = DetrModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)
)
def create_and_check_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
model = DetrForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
| DetrModelTester |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1330951,
"end": 1331683
} | class ____(sgqlc.types.Type, ProjectV2FieldCommon, Node):
"""A single select field inside a project."""
__schema__ = github_schema
__field_names__ = ("options",)
options = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(ProjectV2SingleSelectFieldOption))),
graphql_name="options",
args=sgqlc.types.ArgDict(
(("names", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="names", default=None)),)
),
)
"""Options for the single select field
Arguments:
* `names` (`[String!]`): Filter returned options to only those
matching these names, case insensitive.
"""
| ProjectV2SingleSelectField |
python | pypa__warehouse | warehouse/forms.py | {
"start": 467,
"end": 1078
} | class ____:
def __init__(
self,
require_scheme=True,
allowed_schemes={"http", "https"},
require_authority=True,
):
self.require_scheme = require_scheme
self.allowed_schemes = allowed_schemes
self.require_authority = require_authority
def __call__(self, form, field):
if not is_valid_uri(
field.data,
require_authority=self.require_authority,
allowed_schemes=self.allowed_schemes,
require_scheme=self.require_scheme,
):
raise ValidationError("Invalid URI")
| URIValidator |
python | justquick__django-activity-stream | actstream/tests/test_views.py | {
"start": 146,
"end": 6729
} | class ____(DataTestCase):
def setUp(self):
super(ViewsTest, self).setUp()
self.client.login(username='admin', password='admin')
def get(self, viewname, *args, **params):
return self.client.get('{}?{}'.format(
reverse(viewname, args=args),
urlencode(params)))
def assertQSEqual(self, qs1, qs2):
def attrs(item):
return {key: value
for key, value in item.__dict__.items()
if not key.startswith('_')}
self.assertEqual(len(qs1), len(qs2))
for i, item in enumerate(qs1):
self.assertDictEqual(attrs(item), attrs(qs2[i]))
def test_follow_unfollow(self):
response = self.get('actstream_follow', self.user_ct.pk, self.user3.pk)
self.assertEqual(response.status_code, 201)
self.assertEqual(len(response.templates), 0)
follow = {'user': self.user1, 'content_type': self.user_ct,
'object_id': self.user3.pk}
action = {'actor_content_type': self.user_ct, 'actor_object_id': self.user1.pk,
'target_content_type': self.user_ct, 'target_object_id': self.user3.pk,
'verb': 'started following'}
models.Follow.objects.get(**follow)
models.Action.objects.get(**action)
response = self.get('actstream_unfollow', self.user_ct.pk, self.user3.pk)
self.assertEqual(response.status_code, 204)
self.assertEqual(len(response.templates), 0)
self.assertRaises(models.Follow.DoesNotExist, models.Follow.objects.get, **follow)
response = self.get('actstream_unfollow', self.user_ct.pk, self.user3.pk, next='/redirect/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/redirect/'))
def test_follow_unfollow_with_flag(self):
response = self.get('actstream_follow', self.user_ct.pk, self.user3.pk, 'watching')
self.assertEqual(response.status_code, 201)
self.assertEqual(len(response.templates), 0)
follow = {'user': self.user1, 'content_type': self.user_ct,
'object_id': self.user3.pk, 'flag': 'watching'}
action = {'actor_content_type': self.user_ct, 'actor_object_id': self.user1.pk,
'target_content_type': self.user_ct, 'target_object_id': self.user3.pk,
'verb': 'started watching'}
models.Follow.objects.get(**follow)
models.Action.objects.get(**action)
response = self.get('actstream_unfollow', self.user_ct.pk, self.user3.pk, 'watching')
self.assertEqual(response.status_code, 204)
self.assertEqual(len(response.templates), 0)
self.assertRaises(models.Follow.DoesNotExist, models.Follow.objects.get, **follow)
response = self.get('actstream_unfollow', self.user_ct.pk, self.user3.pk, 'watching', next='/redirect/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/redirect/'))
def test_stream(self):
response = self.get('actstream')
self.assertTemplateUsed(response, 'actstream/actor.html')
self.assertTemplateUsed(response, 'base.html')
self.assertEqual(response.context['actor'], self.user1)
self.assertEqual(response.context['ctype'], self.user_ct)
self.assertQSEqual(response.context['action_list'],
models.user_stream(self.user1))
def test_followers_following(self):
response = self.get('actstream_followers', self.user_ct.pk, self.user2.pk)
self.assertTemplateUsed(response, 'actstream/followers.html')
self.assertEqual(response.context['user'], self.user1)
self.assertQSEqual(response.context['followers'],
models.followers(self.user2))
response = self.get('actstream_following', self.user2.pk)
self.assertTemplateUsed(response, 'actstream/following.html')
self.assertEqual(response.context['user'], self.user2)
self.assertQSEqual(response.context['following'],
models.following(self.user2))
def test_followers_following_with_flag(self):
response = self.get('actstream_followers', self.user_ct.pk, self.user2.pk, 'watching')
self.assertTemplateUsed(response, 'actstream/followers.html')
self.assertEqual(response.context['user'], self.user1)
self.assertQSEqual(response.context['followers'],
models.followers(self.user2, flag='watching'))
response = self.get('actstream_following', self.user2.pk, 'watching')
self.assertTemplateUsed(response, 'actstream/following.html')
self.assertEqual(response.context['user'], self.user2)
self.assertQSEqual(response.context['following'],
models.following(self.user2, flag='watching'))
def test_user(self):
response = self.get('actstream_user', self.user2.username)
self.assertTemplateUsed(response, 'actstream/actor.html')
self.assertEqual(response.context['ctype'], self.user_ct)
self.assertEqual(response.context['actor'], self.user2)
self.assertQSEqual(response.context['action_list'],
models.user_stream(self.user2))
def test_detail(self):
response = self.get('actstream_detail', self.join_action.pk)
self.assertTemplateUsed(response, 'actstream/detail.html')
self.assertTemplateUsed(response, 'actstream/action.html')
self.assertEqual(response.context['action'], self.join_action)
def test_actor(self):
response = self.get('actstream_actor', self.user_ct.pk, self.user2.pk)
self.assertTemplateUsed(response, 'actstream/actor.html')
self.assertTemplateUsed(response, 'base.html')
self.assertEqual(response.context['ctype'], self.user_ct)
self.assertEqual(response.context['actor'], self.user2)
self.assertQSEqual(response.context['action_list'],
models.actor_stream(self.user2))
def test_model(self):
response = self.get('actstream_model', self.user_ct.pk)
self.assertTemplateUsed(response, 'actstream/actor.html')
self.assertTemplateUsed(response, 'base.html')
self.assertEqual(response.context['ctype'], self.user_ct)
self.assertEqual(response.context['actor'], self.user_ct.model_class())
self.assertQSEqual(response.context['action_list'],
models.model_stream(self.user1))
| ViewsTest |
python | huggingface__transformers | src/transformers/models/clip/image_processing_clip.py | {
"start": 1481,
"end": 16776
} | class ____(BaseImageProcessor):
r"""
Constructs a CLIP image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
self._valid_processor_keys = [
"images",
"do_resize",
"size",
"resample",
"do_center_crop",
"crop_size",
"do_rescale",
"rescale_factor",
"do_normalize",
"image_mean",
"image_std",
"do_convert_rgb",
"return_tensors",
"data_format",
"input_data_format",
]
# for backwards compatibility of KOSMOS-2
if "use_square_size" in kwargs and kwargs["use_square_size"]:
self.size = {"height": size["shortest_edge"], "width": size["shortest_edge"]}
# Let's remove `use_square_size` (as it is removed from #27690), so the future Kosmos-2 image processors
# won't have this attr. being saved. (otherwise, it will enter this if branch while there is no more
# `shortest_edge` key.
delattr(self, "use_square_size")
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[int] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, param_name="size", default_to_square=False)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["CLIPImageProcessor"]
| CLIPImageProcessor |
python | ray-project__ray | python/ray/dag/dag_node_operation.py | {
"start": 239,
"end": 1057
} | class ____(Enum):
"""
There are three types of operations that a DAG node can perform:
1. READ: Read from an input channel.
2. COMPUTE: Execute the method corresponding to the node.
3. WRITE: Write to an output channel.
"""
READ = "READ"
COMPUTE = "COMPUTE"
WRITE = "WRITE"
def viz_str(self):
"""
A string representation of the operation type to be used in visualization.
The result string is a single character because conciseness is preferred.
"""
if self == _DAGNodeOperationType.READ:
return "R"
elif self == _DAGNodeOperationType.COMPUTE:
return "C"
elif self == _DAGNodeOperationType.WRITE:
return "W"
assert False, f"Unknown operation type: {self}"
| _DAGNodeOperationType |
python | ray-project__ray | python/ray/train/v2/_internal/metrics/base.py | {
"start": 1557,
"end": 2422
} | class ____(Metric):
"""A metric for tracking elapsed time."""
def __init__(
self,
name: str,
description: str,
base_tags: Dict[str, str],
):
self._current_value = 0.0
super().__init__(
name=name,
default=0.0,
description=description,
base_tags=base_tags,
)
def record(self, value: float):
"""Update the time metric value by accumulating the time.
Args:
value: The time value to increment the metric by.
"""
self._current_value += value
self._gauge.set(self._current_value, self._base_tags)
def get_value(self) -> float:
return self._current_value
def reset(self):
self._current_value = self._default
self._gauge.set(self._default, self._base_tags)
| TimeMetric |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.