language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | weaviate__weaviate-python-client | weaviate/validator.py | {
"start": 293,
"end": 2327
} | class ____(str, BaseEnum):
NUMPY = "numpy"
PANDAS = "pandas"
POLARS = "polars"
TF = "tensorflow"
def _validate_input(inputs: Union[List[_ValidateArgument], _ValidateArgument]) -> None:
"""Validate the values of the input arguments in comparison to the expected types defined in _ValidateArgument.
It is not completely robust so be careful supplying subscripted generics in expected as it may not function as expected.
To avoid this, only supply simply generics like Sequence[...] and List[...] as seen below in __is_valid.
"""
if isinstance(inputs, _ValidateArgument):
inputs = [inputs]
for validate in inputs:
if not any(_is_valid(exp, validate.value) for exp in validate.expected):
raise WeaviateInvalidInputError(
f"Argument '{validate.name}' must be one of: {validate.expected}, but got {type(validate.value)}"
)
def _is_valid(expected: Any, value: Any) -> bool:
if expected is None:
return value is None
# check for types that are not installed
# https://stackoverflow.com/questions/12569452/how-to-identify-numpy-types-in-python
if isinstance(expected, _ExtraTypes):
return expected.value in type(value).__module__
expected_origin = get_origin(expected)
if expected_origin is Union:
args = get_args(expected)
return any(isinstance(value, arg) for arg in args)
if expected_origin is not None and (
issubclass(expected_origin, Sequence) or expected_origin is list
):
if not isinstance(value, Sequence) and not isinstance(value, list):
return False
args = get_args(expected)
if len(args) == 1:
if get_origin(args[0]) is Union:
union_args = get_args(args[0])
return any(isinstance(val, union_arg) for val in value for union_arg in union_args)
else:
return all(isinstance(val, args[0]) for val in value)
return isinstance(value, expected)
| _ExtraTypes |
python | astropy__astropy | astropy/__init__.py | {
"start": 2827,
"end": 3589
} | class ____(ScienceState):
"""
Base class for the real version-setters below.
"""
_value = "test"
_versions = dict(test="test")
@classmethod
def validate(cls, value):
if value not in cls._versions:
raise ValueError(f"Must be one of {list(cls._versions.keys())}")
return cls._versions[value]
@classmethod
def set(cls, value):
"""
Set the current constants value.
"""
import sys
if "astropy.units" in sys.modules:
raise RuntimeError("astropy.units is already imported")
if "astropy.constants" in sys.modules:
raise RuntimeError("astropy.constants is already imported")
return super().set(value)
| base_constants_version |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 42449,
"end": 43571
} | class ____(BaseModel):
type: Literal["CursorPagination"]
cursor_value: str = Field(
...,
description="Value of the cursor defining the next page to fetch.",
examples=[
"{{ headers.link.next.cursor }}",
"{{ last_record['key'] }}",
"{{ response['nextPage'] }}",
],
title="Cursor Value",
)
page_size: Optional[int] = Field(
None,
description="The number of records to include in each pages.",
examples=[100],
title="Page Size",
)
stop_condition: Optional[str] = Field(
None,
description="Template string evaluating when to stop paginating.",
examples=[
"{{ response.data.has_more is false }}",
"{{ 'next' not in headers['link'] }}",
],
title="Stop Condition",
)
decoder: Optional[JsonDecoder] = Field(
None,
description="Component decoding the response so records can be extracted.",
title="Decoder",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| CursorPagination |
python | nryoung__algorithms | tests/test_math.py | {
"start": 357,
"end": 813
} | class ____(unittest.TestCase):
def test_cdf(self):
# Calculate cumulative distribution function for x=1
a = cdf(1)
self.assertAlmostEqual(a, 0.841344746068543)
# Calculate cumulative distribution function x=0
a = cdf(0)
self.assertAlmostEqual(a, 0.5)
# Calculate cumulative distribution function for x=(-1)
a = cdf(-1)
self.assertAlmostEqual(a, 0.15865525393145702)
| TestApproxCdf |
python | numpy__numpy | numpy/f2py/tests/test_return_integer.py | {
"start": 79,
"end": 1113
} | class ____(util.F2PyTest):
def check_function(self, t, tname):
assert t(123) == 123
assert t(123.6) == 123
assert t("123") == 123
assert t(-123) == -123
assert t([123]) == 123
assert t((123, )) == 123
assert t(array(123)) == 123
assert t(array(123, "b")) == 123
assert t(array(123, "h")) == 123
assert t(array(123, "i")) == 123
assert t(array(123, "l")) == 123
assert t(array(123, "B")) == 123
assert t(array(123, "f")) == 123
assert t(array(123, "d")) == 123
# pytest.raises(ValueError, t, array([123],'S3'))
pytest.raises(ValueError, t, "abc")
pytest.raises(IndexError, t, [])
pytest.raises(IndexError, t, ())
pytest.raises(TypeError, t, t)
pytest.raises(TypeError, t, {})
if tname in ["t8", "s8"]:
pytest.raises(OverflowError, t, 100000000000000000000000)
pytest.raises(OverflowError, t, 10000000011111111111111.23)
| TestReturnInteger |
python | huggingface__transformers | tests/quantization/gptq/test_gptq.py | {
"start": 13023,
"end": 13133
} | class ____(GPTQTestCUDA):
device_map = "auto"
@require_accelerate
@require_torch_multi_gpu
| GPTQTestDeviceMap |
python | sympy__sympy | sympy/physics/secondquant.py | {
"start": 33627,
"end": 36821
} | class ____(FermionState, FockStateBra):
"""
See Also
========
FockStateFermionKet
Examples
========
>>> from sympy.physics.secondquant import FBra
>>> FBra([1, 2])
FockStateFermionBra((1, 2))
"""
def _dagger_(self):
return FockStateFermionKet(*self.args)
BBra = FockStateBosonBra
BKet = FockStateBosonKet
FBra = FockStateFermionBra
FKet = FockStateFermionKet
def _apply_Mul(m):
"""
Take a Mul instance with operators and apply them to states.
Explanation
===========
This method applies all operators with integer state labels
to the actual states. For symbolic state labels, nothing is done.
When inner products of FockStates are encountered (like <a|b>),
they are converted to instances of InnerProduct.
This does not currently work on double inner products like,
<a|b><c|d>.
If the argument is not a Mul, it is simply returned as is.
"""
if not isinstance(m, Mul):
return m
c_part, nc_part = m.args_cnc()
n_nc = len(nc_part)
if n_nc in (0, 1):
return m
else:
last = nc_part[-1]
next_to_last = nc_part[-2]
if isinstance(last, FockStateKet):
if isinstance(next_to_last, SqOperator):
if next_to_last.is_symbolic:
return m
else:
result = next_to_last.apply_operator(last)
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
elif isinstance(next_to_last, Pow):
if isinstance(next_to_last.base, SqOperator) and \
next_to_last.exp.is_Integer:
if next_to_last.base.is_symbolic:
return m
else:
result = last
for i in range(next_to_last.exp):
result = next_to_last.base.apply_operator(result)
if result == 0:
break
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
else:
return m
elif isinstance(next_to_last, FockStateBra):
result = InnerProduct(next_to_last, last)
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
else:
return m
else:
return m
def apply_operators(e):
"""
Take a SymPy expression with operators and states and apply the operators.
Examples
========
>>> from sympy.physics.secondquant import apply_operators
>>> from sympy import sympify
>>> apply_operators(sympify(3)+4)
7
"""
e = e.expand()
muls = e.atoms(Mul)
subs_list = [(m, _apply_Mul(m)) for m in iter(muls)]
return e.subs(subs_list)
| FockStateFermionBra |
python | huggingface__transformers | src/transformers/models/dab_detr/modeling_dab_detr.py | {
"start": 3796,
"end": 4906
} | class ____(Seq2SeqModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`):
Reference points (reference points of each layer of the decoder).
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
reference_points: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`DabDetrForObjectDetection`].
"""
)
# Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->DabDetr
| DabDetrModelOutput |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 231677,
"end": 231984
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("Commit", graphql_name="node")
| CommitEdge |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-isaacus/tests/test_isaacus_embeddings.py | {
"start": 1166,
"end": 12629
} | class ____:
"""Test IsaacusEmbedding class."""
def test_class_name(self, isaacus_embedding: IsaacusEmbedding) -> None:
"""Test class name."""
assert IsaacusEmbedding.class_name() == "IsaacusEmbedding"
assert isaacus_embedding.class_name() == "IsaacusEmbedding"
def test_init_with_parameters(self) -> None:
"""Test initialization with parameters."""
embedding = IsaacusEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_BASE_URL,
dimensions=1024,
task="retrieval/document",
overflow_strategy="drop_end",
timeout=30.0,
)
assert embedding.model == STUB_MODEL
assert embedding.api_key == STUB_API_KEY
assert embedding.base_url == STUB_BASE_URL
assert embedding.dimensions == 1024
assert embedding.task == "retrieval/document"
assert embedding.overflow_strategy == "drop_end"
assert embedding.timeout == 30.0
def test_init_with_environment_variables(self) -> None:
"""Test initialization with environment variables."""
with patch.dict(
os.environ,
{
"ISAACUS_API_KEY": STUB_API_KEY,
"ISAACUS_BASE_URL": STUB_BASE_URL,
},
):
embedding = IsaacusEmbedding()
assert embedding.model == STUB_MODEL
assert embedding.api_key == STUB_API_KEY
assert embedding.base_url == STUB_BASE_URL
def test_init_missing_api_key(self) -> None:
"""Test initialization with missing API key."""
with pytest.raises(ValueError, match="API key is required"):
IsaacusEmbedding(
base_url=STUB_BASE_URL,
)
def test_get_text_embedding_success(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test successful text embedding."""
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
embedding = isaacus_embedding.get_text_embedding("test text")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
def test_get_text_embedding_with_task(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test text embedding with task parameter."""
isaacus_embedding.task = "retrieval/document"
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
) as mock_create:
embedding = isaacus_embedding.get_text_embedding("test text")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
# Verify task was passed to API
call_kwargs = mock_create.call_args.kwargs
assert call_kwargs["task"] == "retrieval/document"
def test_get_query_embedding_uses_retrieval_query_task(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test that get_query_embedding uses retrieval/query task."""
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
) as mock_create:
embedding = isaacus_embedding.get_query_embedding("test query")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
# Verify task was set to retrieval/query
call_kwargs = mock_create.call_args.kwargs
assert call_kwargs["task"] == "retrieval/query"
def test_get_text_embedding_error(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test text embedding with error."""
with patch.object(
isaacus_embedding._client.embeddings,
"create",
side_effect=Exception("API error"),
):
with pytest.raises(ValueError, match="Unable to embed text"):
isaacus_embedding.get_text_embedding("test text")
def test_get_text_embedding_no_embeddings_returned(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test text embedding when no embeddings are returned."""
mock_response = MagicMock()
mock_response.embeddings = []
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
with pytest.raises(ValueError, match="No embeddings returned from API"):
isaacus_embedding.get_text_embedding("test text")
def test_get_text_embeddings_batch(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test batch text embeddings."""
# Create mock response with multiple embeddings
mock_emb1 = MagicMock()
mock_emb1.embedding = [0.1, 0.2, 0.3]
mock_emb1.index = 0
mock_emb2 = MagicMock()
mock_emb2.embedding = [0.4, 0.5, 0.6]
mock_emb2.index = 1
mock_emb3 = MagicMock()
mock_emb3.embedding = [0.7, 0.8, 0.9]
mock_emb3.index = 2
mock_response = MagicMock()
mock_response.embeddings = [mock_emb1, mock_emb2, mock_emb3]
texts = ["text1", "text2", "text3"]
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
embeddings = isaacus_embedding.get_text_embedding_batch(texts)
assert len(embeddings) == 3
assert embeddings[0] == [0.1, 0.2, 0.3]
assert embeddings[1] == [0.4, 0.5, 0.6]
assert embeddings[2] == [0.7, 0.8, 0.9]
def test_get_text_embeddings_maintains_order(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test that batch embeddings maintain correct order."""
# Create mock response with embeddings out of order
mock_emb1 = MagicMock()
mock_emb1.embedding = [0.1, 0.2, 0.3]
mock_emb1.index = 0
mock_emb2 = MagicMock()
mock_emb2.embedding = [0.4, 0.5, 0.6]
mock_emb2.index = 1
mock_response = MagicMock()
# Return embeddings out of order
mock_response.embeddings = [mock_emb2, mock_emb1]
texts = ["text1", "text2"]
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
embeddings = isaacus_embedding.get_text_embedding_batch(texts)
# Should be sorted by index
assert embeddings[0] == [0.1, 0.2, 0.3]
assert embeddings[1] == [0.4, 0.5, 0.6]
@pytest.mark.asyncio
async def test_aget_text_embedding_success(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test successful async text embedding."""
with patch.object(
isaacus_embedding._aclient.embeddings, "create", return_value=mock_response
):
embedding = await isaacus_embedding.aget_text_embedding("test text")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
@pytest.mark.asyncio
async def test_aget_query_embedding_uses_retrieval_query_task(
self, isaacus_embedding: IsaacusEmbedding, mock_response: MagicMock
) -> None:
"""Test that aget_query_embedding uses retrieval/query task."""
with patch.object(
isaacus_embedding._aclient.embeddings, "create", return_value=mock_response
) as mock_create:
embedding = await isaacus_embedding.aget_query_embedding("test query")
assert embedding == [0.1, 0.2, 0.3, 0.4, 0.5]
# Verify task was set to retrieval/query
call_kwargs = mock_create.call_args.kwargs
assert call_kwargs["task"] == "retrieval/query"
@pytest.mark.asyncio
async def test_aget_text_embedding_error(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test async text embedding with error."""
with patch.object(
isaacus_embedding._aclient.embeddings,
"create",
side_effect=Exception("API error"),
):
with pytest.raises(ValueError, match="Unable to embed text"):
await isaacus_embedding.aget_text_embedding("test text")
@pytest.mark.asyncio
async def test_aget_text_embeddings_batch(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test async batch text embeddings."""
# Create mock response with multiple embeddings
mock_emb1 = MagicMock()
mock_emb1.embedding = [0.1, 0.2, 0.3]
mock_emb1.index = 0
mock_emb2 = MagicMock()
mock_emb2.embedding = [0.4, 0.5, 0.6]
mock_emb2.index = 1
mock_response = MagicMock()
mock_response.embeddings = [mock_emb1, mock_emb2]
texts = ["text1", "text2"]
with patch.object(
isaacus_embedding._aclient.embeddings, "create", return_value=mock_response
):
embeddings = await isaacus_embedding.aget_text_embedding_batch(texts)
assert len(embeddings) == 2
assert embeddings[0] == [0.1, 0.2, 0.3]
assert embeddings[1] == [0.4, 0.5, 0.6]
def test_prepare_request_params_basic(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test _prepare_request_params with basic parameters."""
params = isaacus_embedding._prepare_request_params("test text")
assert params["model"] == STUB_MODEL
assert params["texts"] == "test text"
assert "task" not in params # No task set by default
def test_prepare_request_params_with_all_options(self) -> None:
"""Test _prepare_request_params with all options set."""
embedding = IsaacusEmbedding(
model=STUB_MODEL,
api_key=STUB_API_KEY,
base_url=STUB_BASE_URL,
dimensions=1024,
task="retrieval/document",
overflow_strategy="drop_end",
)
params = embedding._prepare_request_params("test text")
assert params["model"] == STUB_MODEL
assert params["texts"] == "test text"
assert params["task"] == "retrieval/document"
assert params["dimensions"] == 1024
assert params["overflow_strategy"] == "drop_end"
def test_prepare_request_params_task_override(
self, isaacus_embedding: IsaacusEmbedding
) -> None:
"""Test _prepare_request_params with task override."""
isaacus_embedding.task = "retrieval/document"
params = isaacus_embedding._prepare_request_params(
"test text", task_override="retrieval/query"
)
# Override should take precedence
assert params["task"] == "retrieval/query"
def test_embedding_dimensions(self, isaacus_embedding: IsaacusEmbedding) -> None:
"""Test that embeddings have the expected dimensions."""
mock_emb = MagicMock()
mock_emb.embedding = [0.1] * 1792 # Default Kanon 2 dimension
mock_emb.index = 0
mock_response = MagicMock()
mock_response.embeddings = [mock_emb]
with patch.object(
isaacus_embedding._client.embeddings, "create", return_value=mock_response
):
embedding = isaacus_embedding.get_text_embedding("test text")
assert len(embedding) == 1792
assert all(isinstance(x, float) for x in embedding)
| TestIsaacusEmbedding |
python | django__django | tests/model_inheritance/models.py | {
"start": 539,
"end": 811
} | class ____(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveIntegerField()
class Meta:
abstract = True
ordering = ["name"]
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.name)
| CommonInfo |
python | facebook__pyre-check | scripts/compare_pysa_models_to_json.py | {
"start": 1239,
"end": 13327
} | class ____(TypedDict):
parameters: Dict[str, TaintModel]
return_model: TaintModel
def make_default_taint_model() -> TaintModel:
return {
"sources": set(),
"sinks": set(),
"tito": set(),
}
def make_default_target_model() -> TargetModel:
return {
"parameters": defaultdict(make_default_taint_model),
"return_model": make_default_taint_model(),
}
def parse_kinds(taints: List[Dict[str, Any]]) -> Set[str]:
"""
Parse the list of sources/sinks/tito from a Pysa JSON output
dump, e.g.
[ { "decl": null, "kinds": [ { "kind": "Test" } ]
into a set consisting of just the leaf names, i.e.
{ "Test" }
"""
kinds = set()
for taint in taints:
for kind in taint.get("kinds", []):
kinds.add(kind["kind"])
for kind in taint.get("leaves", []):
kinds.add(kind["kind"])
return kinds
def json_to_parsed_model(taint_data: List[Dict[str, Any]]) -> TargetModel:
"""
Parse the list of taint models from a Pysa JSON output dump, e.g.
[{
"kind": "model",
"data": {
"callable": "foo.bar.some_callable",
"sources": [
{
"port": "formal(data)",
"taint": [...]
}
]
"sinks": [...]
"tito": [...]
}
}]
into the form
{
'parameters': {'x': {'sources': {'A'}, 'sinks': {}, 'titos': {} }, ...},
'return_model': {'sources': {}, 'sinks': {'B'}, 'tito': {}}
}
"""
result: TargetModel = make_default_target_model()
for data in taint_data:
if "data" not in data:
continue
model = data["data"]
for model_type in ANNOTATION_TO_MODEL_TYPE.values():
if model_type in model:
for entry in model[model_type]:
port = entry["port"]
taints = parse_kinds(entry["taint"])
if port == "result":
result["return_model"][model_type].update(taints)
else:
# TODO(sym): This currently does not support 'AppliesTo'
# models.
port = entry["port"].replace("formal(", "").replace(")", "")
result["parameters"][port][model_type].update(taints)
return result
def get_models_from_json_file(path: str) -> Dict[str, TargetModel]:
"""
Process a JSON file and return a dictionary of callables and their models,
in the form:
{
'parameters': {'x': {'TaintSource[A]'}},
'return_model': {'TaintSink[B]'}
}
"""
json_models: Dict[str, TargetModel] = defaultdict(make_default_target_model)
with Path(path).open() as json_file:
for entry in json.loads(json_file.read()):
for _, models in entry.items():
for json_model in models:
callable_name = json_model["callable"]
model = json_to_parsed_model(json_model["model"])
json_models[callable_name]["parameters"].update(model["parameters"])
json_models[callable_name]["return_model"].update(
model["return_model"]
)
return json_models
def get_callable_model_from_line(line: str) -> Optional[Tuple[str, TargetModel]]:
match = PYSA_CALLABLE_MODEL_PATTERN.match(line)
if not match:
return None
result = make_default_target_model()
callable_name = match.group("callable_name")
parameters = match.group("parameters")
return_model = match.group("return_model")
if not callable_name and (not parameters and not return_model):
return None
annotated_parameters = PARAMETERS_ANNOTATION_PATTERN.findall(parameters)
for (
parameter_name,
model_annotation,
leaves,
_,
_,
) in annotated_parameters:
if not parameter_name or not model_annotation or not leaves:
continue
model_type = ANNOTATION_TO_MODEL_TYPE[model_annotation]
parameter_model = {annotation.strip() for annotation in leaves.split(",")}
# pyre-fixme[26]: TypedDict key must be a string literal.
result["parameters"][parameter_name][model_type].update(parameter_model)
if return_model:
annotation_match = RETURN_ANNOTATION_PATTERN.match(return_model)
if not annotation_match or None in annotation_match.groups():
return None
model_type = ANNOTATION_TO_MODEL_TYPE[
annotation_match.group("model_type").strip()
]
return_model = {
annotation.strip()
for annotation in annotation_match.group("model_leaves").split(",")
}
# pyre-fixme[26]: TypedDict key must be a string literal.
result["return_model"][model_type].update(return_model)
return (callable_name, result)
def get_attribute_model_from_line(line: str) -> Optional[Tuple[str, TargetModel]]:
match = PYSA_ATTRIBUTE_MODEL_PATTERN.match(line)
if not match:
return None
result = make_default_target_model()
attribute_name = "Obj{{{}}}".format(match.group("attribute_name"))
attribute_model = match.group("attribute_model")
if not attribute_name or not attribute_model:
return None
annotation_match = RETURN_ANNOTATION_PATTERN.match(attribute_model)
if not annotation_match or None in annotation_match.groups():
return None
model_type = ANNOTATION_TO_MODEL_TYPE[annotation_match.group("model_type").strip()]
attribute_model_leaves = {
annotation.strip()
for annotation in annotation_match.group("model_leaves").split(", ")
}
if model_type == "sources":
result["return_model"][model_type].update(attribute_model_leaves)
else:
result["parameters"]["$global"][
# pyre-fixme[26]: TypedDict key must be a string literal.
model_type
].update(attribute_model_leaves)
return (attribute_name, result)
def get_models_from_pysa_file(path: str) -> Dict[str, TargetModel]:
"""
Process a .pysa file with models in the form of:
def foo.bar(x: TaintSource[A], b) -> TaintSink[B]: ...
and return a dictionary of callables and their models in the form:
{
'parameters': {'x': {'sources': {'A', ...}, 'sinks': ... }, ...},
'return_model': {'sources': {}, 'sinks': {'B'}, 'tito': {}}
}
IMPORTANT: Note that this only works on .pysa files where:
1. All the models are self-contained on a single line.
2. Models do not contain ViaTag[...], AppliesTo[...] syntax
This script was originally intended to compare models that were generated
by the existing Python model generators, so it should be noted that
this will likely not work with most user-defined .pysa files.
"""
pysa_models: Dict[str, TargetModel] = defaultdict(make_default_target_model)
skipped = 0
with Path(path).open() as pysa_file:
for line in pysa_file:
# This is a quick hack/heuristic to skip lines with no models in them
# since regex matching can be more costly.
if "[" not in line:
skipped += 1
continue
if "def " in line:
result = get_callable_model_from_line(line)
else:
result = get_attribute_model_from_line(line)
if result:
name, model = result
for parameter in model["parameters"]:
for model_type in model["parameters"][parameter]:
# pyre-fixme[26]: TypedDict key must be a string literal.
pysa_models[name]["parameters"][parameter][model_type].update(
# pyre-fixme[26]: TypedDict key must be a string literal.
model["parameters"][parameter][model_type]
)
pysa_models[name]["parameters"].update(model["parameters"])
for model_type in model["return_model"]:
# pyre-fixme[26]: TypedDict key must be a string literal.
pysa_models[name]["return_model"][model_type].update(
# pyre-fixme[26]: TypedDict key must be a string literal.
model["return_model"][model_type]
)
else:
skipped += 1
LOG.warning(f"Skipped {skipped} lines in .pysa (no models found or were invalid).")
return pysa_models
def main() -> None:
parser = argparse.ArgumentParser(
description="A script to compare models in a .pysa file "
"to the JSON model dump generated by Pysa."
)
parser.add_argument(
"-j",
"--json",
required=True,
type=str,
help="Path of the JSON file containing Pysa's taint output dump.",
)
parser.add_argument(
"-p",
"--pysa",
required=True,
type=str,
help=("Path of the .pysa model file."),
)
arguments = parser.parse_args()
logging.basicConfig(
format="[%(asctime)s][%(levelname)s]: %(message)s", level=logging.INFO
)
json_models: Dict[str, TargetModel] = get_models_from_json_file(arguments.json)
pysa_models: Dict[str, TargetModel] = get_models_from_pysa_file(arguments.pysa)
# Models in .json that differ from the .pysa
diff_json = {
k: v
for k, v in json_models.items()
if not (k in pysa_models and json_models[k] == pysa_models[k])
}
# Models in the .pysa that differ from the .json
diff_pysa = {
k: v
for k, v in pysa_models.items()
if not (k in json_models and pysa_models[k] == json_models[k])
}
# Pysa skips analyzing things that inherit from e.g. testing.unittest.UnitTest by
# default, which is why the model query results are missing a few models compared to
# the Python model generator. Here we move assume all callables with 'test' in their
# name are tests and move them to a separate section to not clutter the diff
# results.
diff_pysa_test = {k: v for k, v in diff_pysa.items() if "test" in k}
diff_pysa_non_test = {k: v for k, v in diff_pysa.items() if "test" not in k}
# Print the results.
diff_json_message = "\n".join(
[
"{}\nIn JSON: {}\nIn .pysa: {}\n".format(
callable_name,
json_models[callable_name],
pysa_models[callable_name] if callable_name in pysa_models else {},
)
for callable_name in sorted(diff_json.keys())
]
)
diff_pysa_test_message = "\n".join(
[
"{}\nIn .pysa: {}\nIn JSON: {}\n".format(
callable_name,
pysa_models[callable_name],
json_models[callable_name] if callable_name in json_models else {},
)
for callable_name in sorted(diff_pysa_test.keys())
]
)
diff_pysa_non_test_message = "\n".join(
[
"{}\nIn .pysa: {}\nIn JSON: {}\n".format(
callable_name,
pysa_models[callable_name],
json_models[callable_name] if callable_name in json_models else {},
)
for callable_name in sorted(diff_pysa_non_test.keys())
]
)
LOG.info(
f""""
-- RESULTS --
Total models in JSON: {len(json_models)}
Total models in .pysa: {len(pysa_models)}
-------
Models in JSON but not in .pysa: {len(diff_json)}
{diff_json_message}
-------
Models in .pysa but not in JSON (test): {len(diff_pysa_test)}
{diff_pysa_test_message}
-------
Models in .pysa but not in JSON (non-test): {len(diff_pysa_non_test)}
{diff_pysa_non_test_message}
"""
)
if __name__ == "__main__":
main()
| TargetModel |
python | facebook__pyre-check | tools/generate_taint_models/tests/get_request_specific_data_test.py | {
"start": 384,
"end": 1367
} | class ____(unittest.TestCase):
def test_compute_models(self) -> None:
source = "TaintSource[RequestSpecificData]"
self.assertEqual(
[
*map(
str,
RequestSpecificDataGenerator(
django_urls=MagicMock()
).compute_models(all_functions),
)
],
[
f"def {qualifier}.TestClass.methodA(self: {source}, x: {source}): ...",
f"def {qualifier}.TestClass.methodB(self: {source}, *args: {source})"
": ...",
f"def {qualifier}.testA(): ...",
f"def {qualifier}.testB(x: {source}): ...",
f"def {qualifier}.testC(x: {source}): ...",
f"def {qualifier}.testD(x: {source}, *args: {source}): ...",
f"def {qualifier}.testE(x: {source}, **kwargs: {source}): ...",
],
)
| GetRequestSpecificDataTest |
python | ionelmc__pytest-benchmark | tests/test_storage.py | {
"start": 1718,
"end": 21262
} | class ____(BenchmarkSession):
def __init__(self, name_format):
self.histogram = True
self.verbose = False
self.quiet = False
self.benchmarks = []
self.performance_regressions = []
self.sort = 'min'
self.compare = '0001'
logger = logging.getLogger(__name__)
self.logger = Namespace(
debug=lambda *args, **_kwargs: logger.debug(*args),
info=lambda *args, **_kwargs: logger.info(*args),
warning=lambda *args, **_kwargs: logger.warning(*args),
error=lambda *args, **_kwargs: logger.error(*args),
)
self.machine_id = 'FoobarOS'
self.machine_info = {'foo': 'bar'}
self.save = self.autosave = self.json = False
self.name_format = NAME_FORMATTERS[name_format]
self.options = {
'min_rounds': 123,
'min_time': 234,
'max_time': 345,
'cprofile': False,
}
self.cprofile_sort_by = 'cumtime'
self.cprofile_loops = 1
self.cprofile_top = 25
self.compare_fail = []
self.config = Namespace(
hook=Namespace(
pytest_benchmark_scale_unit=pytest_benchmark_scale_unit,
pytest_benchmark_group_stats=pytest_benchmark_group_stats,
pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'},
pytest_benchmark_update_machine_info=lambda **kwargs: None,
pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info,
pytest_benchmark_generate_json=pytest_benchmark_generate_json,
pytest_benchmark_update_json=lambda **kwargs: None,
pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'},
pytest_benchmark_update_commit_info=lambda **kwargs: None,
)
)
self.storage = FileStorage(str(STORAGE), default_machine_id=get_machine_id(), logger=self.logger)
self.group_by = 'group'
self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr', 'outliers', 'rounds', 'iterations', 'ops']
for bench_file, data in reversed(list(self.storage.load('[0-9][0-9][0-9][0-9]_*'))):
self.benchmarks.extend(
Namespace(
as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench, cprofile='cumtime': dict(
_bench, **_bench['stats']
)
if flat
else dict(_bench),
name=bench['name'],
fullname=bench['fullname'],
group=bench['group'],
options=bench['options'],
has_error=False,
params=None,
**bench['stats'],
)
for bench in data['benchmarks']
)
break
text_type = str
def force_text(text):
if isinstance(text, text_type):
return text
else:
return text.decode('utf-8')
def force_bytes(text):
if isinstance(text, text_type):
return text.encode('utf-8')
else:
return text
@pytest.fixture(params=['short', 'normal', 'long', 'trial'])
def name_format(request):
return request.param
@pytest.fixture
def sess(request, name_format):
return MockSession(name_format)
def make_logger(sess):
output = StringIO()
sess.logger = sess.storage.logger = Namespace(
warning=lambda text, **opts: output.write(force_text(text) + '\n'),
info=lambda text, **opts: output.write(force_text(text) + '\n'),
error=lambda text: output.write(force_text(text) + '\n'),
)
return output
def test_rendering(sess):
output = make_logger(sess)
sess.histogram = os.path.join('docs', 'sample')
sess.compare = '*/*'
sess.sort = 'name'
sess.handle_loading()
sess.finish()
sess.display(
Namespace(
ensure_newline=lambda: None,
write_line=lambda line, **opts: output.write(force_text(line) + '\n'),
write=lambda text, **opts: output.write(force_text(text)),
rewrite=lambda text, **opts: output.write(force_text(text)),
)
)
def test_regression_checks(sess, name_format):
output = make_logger(sess)
sess.handle_loading()
sess.performance_regressions = []
sess.compare_fail = [PercentageRegressionCheck('stddev', 5), DifferenceRegressionCheck('max', 0.000001)]
sess.finish()
pytest.raises(
PerformanceRegression,
sess.display,
Namespace(
ensure_newline=lambda: None,
write_line=lambda line, **opts: output.write(force_text(line) + '\n'),
write=lambda text, **opts: output.write(force_text(text)),
rewrite=lambda text, **opts: output.write(force_text(text)),
),
)
print(output.getvalue())
assert (
sess.performance_regressions
== {
'normal': [
(
'test_xfast_parametrized[0] (0001_b87b9aa)',
"Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000",
),
(
'test_xfast_parametrized[0] (0001_b87b9aa)',
"Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000",
),
],
'short': [
('xfast_parametrized[0] (0001)', "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
('xfast_parametrized[0] (0001)', "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000"),
],
'long': [
(
'tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)',
"Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000",
),
(
'tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)',
"Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000",
),
],
'trial': [
('0001', "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
('0001', "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000"),
],
}[name_format]
)
output = make_logger(sess)
pytest.raises(PerformanceRegression, sess.check_regressions)
print(output.getvalue())
assert (
output.getvalue()
== {
'short': """Performance has regressed:
\txfast_parametrized[0] (0001) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
\txfast_parametrized[0] (0001) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
""",
'normal': """Performance has regressed:
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
""",
'long': """Performance has regressed:
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
""",
'trial': """Performance has regressed:
\t0001 - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
\t0001 - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
""",
}[name_format]
)
@pytest.mark.skipif(sys.version_info[:2] < (2, 7), reason='Something weird going on, see: https://bugs.python.org/issue4482')
def test_regression_checks_inf(sess, name_format):
output = make_logger(sess)
sess.compare = '0002'
sess.handle_loading()
sess.performance_regressions = []
sess.compare_fail = [PercentageRegressionCheck('stddev', 5), DifferenceRegressionCheck('max', 0.000001)]
sess.finish()
pytest.raises(
PerformanceRegression,
sess.display,
Namespace(
ensure_newline=lambda: None,
write_line=lambda line, **opts: output.write(force_text(line) + '\n'),
write=lambda text, **opts: output.write(force_text(text)),
rewrite=lambda text, **opts: output.write(force_text(text)),
),
)
print(output.getvalue())
assert (
sess.performance_regressions
== {
'normal': [
('test_xfast_parametrized[0] (0002_b87b9aa)', "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
(
'test_xfast_parametrized[0] (0002_b87b9aa)',
"Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000",
),
],
'short': [
('xfast_parametrized[0] (0002)', "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
('xfast_parametrized[0] (0002)', "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000"),
],
'long': [
(
'tests/test_normal.py::test_xfast_parametrized[0] '
'(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)',
"Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000",
),
(
'tests/test_normal.py::test_xfast_parametrized[0] '
'(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)',
"Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000",
),
],
'trial': [
('0002', "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
('0002', "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000"),
],
}[name_format]
)
output = make_logger(sess)
pytest.raises(PerformanceRegression, sess.check_regressions)
print(output.getvalue())
assert (
output.getvalue()
== {
'short': """Performance has regressed:
\txfast_parametrized[0] (0002) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
\txfast_parametrized[0] (0002) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
""",
'normal': """Performance has regressed:
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
""",
'long': """Performance has regressed:
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
""",
'trial': """Performance has regressed:
\t0002 - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
\t0002 - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
""",
}[name_format]
)
def test_compare_1(sess, LineMatcher):
output = make_logger(sess)
sess.handle_loading()
sess.finish()
sess.display(
Namespace(
ensure_newline=lambda: None,
write_line=lambda line, **opts: output.write(force_text(line) + '\n'),
write=lambda text, **opts: output.write(force_text(text)),
rewrite=lambda text, **opts: output.write(force_text(text)),
)
)
print(output.getvalue())
LineMatcher(output.getvalue().splitlines()).fnmatch_lines(
[
'Benchmark machine_info is different. Current: {"foo": "bar"} VS saved: {"machine": "x86_64", "node": "minibox", "processor": "x86_64", "python_compiler": "GCC 4.6.3", "python_implementation": "CPython", "python_version": "2.7.3", "release": "3.13.0-55-generic", "system": "Linux"} (location: tests/test_storage).',
'Comparing against benchmarks from: 0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes.json',
'',
'*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*',
'Name (time in ns) * Min * Max Mean StdDev Median IQR Outliers Rounds Iterations OPS (Mops/s) *',
'-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
'*0001* 217.3145 (1.0) 11*447.3891 (1.0) 262.2408 (1.00) 214.0442 (1.0) 220.1664 (1.00) 38.2154 (2.03) 90;1878 9987 418 3.8133 (1.00)*',
'*NOW* 217.9511 (1.00) 13*290.0380 (1.16) 261.2051 (1.0) 263.9842 (1.23) 220.1638 (1.0) 18.8080 (1.0) 160;1726 9710 431 3.8284 (1.0)*',
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
'Legend:',
' Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
' OPS: Operations Per Second, computed as 1 / Mean',
]
)
def test_compare_2(sess, LineMatcher):
output = make_logger(sess)
sess.compare = '0002'
sess.handle_loading()
sess.finish()
sess.display(
Namespace(
ensure_newline=lambda: None,
write_line=lambda line, **opts: output.write(force_text(line) + '\n'),
section=lambda line, **opts: output.write(force_text(line) + '\n'),
write=lambda text, **opts: output.write(force_text(text)),
rewrite=lambda text, **opts: output.write(force_text(text)),
)
)
print(output.getvalue())
LineMatcher(output.getvalue().splitlines()).fnmatch_lines(
[
'Benchmark machine_info is different. Current: {"foo": "bar"} VS saved: {"machine": "x86_64", "node": "minibox", "processor": "x86_64", "python_compiler": "GCC 4.6.3", "python_implementation": "CPython", "python_version": "2.7.3", "release": "3.13.0-55-generic", "system": "Linux"} (location: tests/test_storage).',
'Comparing against benchmarks from: 0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes.json',
'',
'*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*',
'Name (time in ns) * Min *Max Mean StdDev Median IQR Outliers Rounds Iterations OPS (Mops/s)*',
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
'*0002* 216.9028 (1.0) 7*739.2997 (1.0) 254.0585 (1.0) 0.0000 (1.0) 219.8103 (1.0) 27.3309 (1.45) 235;1688 11009 410 3.9361 (1.0)*',
'*NOW* 217.9511 (1.00) 13*290.0380 (1.72) 261.2051 (1.03) 263.9842 (inf) 220.1638 (1.00) 18.8080 (1.0) 160;1726 9710 431 3.8284 (0.97)*',
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
'Legend:',
' Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
' OPS: Operations Per Second, computed as 1 / Mean',
]
)
@freeze_time('2015-08-15T00:04:18.687119')
def test_save_json(sess, tmpdir, monkeypatch):
monkeypatch.setattr(plugin, '__version__', '2.5.0')
sess.save = False
sess.autosave = False
sess.json = LooseFileLike()
sess.save_data = False
sess.handle_saving()
assert tmpdir.listdir() == []
assert json.loads(sess.json.getvalue().decode()) == JSON_DATA
@freeze_time('2015-08-15T00:04:18.687119')
def test_save_with_name(sess, tmpdir, monkeypatch):
monkeypatch.setattr(plugin, '__version__', '2.5.0')
sess.save = 'foobar'
sess.autosave = True
sess.json = None
sess.save_data = False
sess.storage.path = Path(str(tmpdir))
sess.handle_saving()
files = list(Path(str(tmpdir)).rglob('*.json'))
print(files)
assert len(files) == 1
assert json.loads(files[0].read_text(encoding='utf8')) == JSON_DATA
@freeze_time('2015-08-15T00:04:18.687119')
def test_save_no_name(sess, tmpdir, monkeypatch):
monkeypatch.setattr(plugin, '__version__', '2.5.0')
sess.save = True
sess.autosave = True
sess.json = None
sess.save_data = False
sess.storage.path = Path(str(tmpdir))
sess.handle_saving()
files = list(Path(str(tmpdir)).rglob('*.json'))
assert len(files) == 1
assert json.loads(files[0].read_text(encoding='utf8')) == JSON_DATA
@freeze_time('2015-08-15T00:04:18.687119')
def test_save_with_error(sess, tmpdir, monkeypatch):
monkeypatch.setattr(plugin, '__version__', '2.5.0')
sess.save = True
sess.autosave = True
sess.json = None
sess.save_data = False
sess.storage.path = Path(str(tmpdir))
for bench in sess.benchmarks:
bench.has_error = True
sess.handle_saving()
files = list(Path(str(tmpdir)).rglob('*.json'))
assert len(files) == 1
assert json.loads(files[0].read_text(encoding='utf8')) == {
'benchmarks': [],
'commit_info': {'foo': 'bar'},
'datetime': '2015-08-15T00:04:18.687119+00:00',
'machine_info': {'foo': 'bar'},
'version': '2.5.0',
}
@freeze_time('2015-08-15T00:04:18.687119')
def test_autosave(sess, tmpdir, monkeypatch):
monkeypatch.setattr(plugin, '__version__', '2.5.0')
sess.save = False
sess.autosave = True
sess.json = None
sess.save_data = False
sess.storage.path = Path(str(tmpdir))
sess.handle_saving()
files = list(Path(str(tmpdir)).rglob('*.json'))
assert len(files) == 1
assert json.loads(files[0].read_text(encoding='utf8')) == JSON_DATA
| MockSession |
python | ray-project__ray | rllib/models/tf/tf_action_dist.py | {
"start": 687,
"end": 1714
} | class ____(ActionDistribution):
"""TF-specific extensions for building action distributions."""
@override(ActionDistribution)
def __init__(self, inputs: List[TensorType], model: ModelV2):
super().__init__(inputs, model)
self.sample_op = self._build_sample_op()
self.sampled_action_logp_op = self.logp(self.sample_op)
def _build_sample_op(self) -> TensorType:
"""Implement this instead of sample(), to enable op reuse.
This is needed since the sample op is non-deterministic and is shared
between sample() and sampled_action_logp().
"""
raise NotImplementedError
@override(ActionDistribution)
def sample(self) -> TensorType:
"""Draw a sample from the action distribution."""
return self.sample_op
@override(ActionDistribution)
def sampled_action_logp(self) -> TensorType:
"""Returns the log probability of the sampled action."""
return self.sampled_action_logp_op
@OldAPIStack
| TFActionDistribution |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 11647,
"end": 12493
} | class ____(BaseActionTranslator):
@property
def action_type(self) -> ActionType:
return ActionType.PAGERDUTY
field_mappings = {
"priority": FieldMapping(
source_field="severity", default_value=str(PAGERDUTY_DEFAULT_SEVERITY)
)
}
@property
def required_fields(self) -> list[str]:
return [
ACTION_FIELD_MAPPINGS[ActionType.PAGERDUTY][
ActionFieldMappingKeys.INTEGRATION_ID_KEY.value
],
ACTION_FIELD_MAPPINGS[ActionType.PAGERDUTY][
ActionFieldMappingKeys.TARGET_IDENTIFIER_KEY.value
],
]
@property
def target_type(self) -> int:
return ActionTarget.SPECIFIC.value
@property
def blob_type(self) -> type[DataBlob]:
return OnCallDataBlob
| PagerDutyActionTranslator |
python | pydantic__pydantic | pydantic-core/tests/test_errors.py | {
"start": 37697,
"end": 49841
} | class ____:
def __str__(self):
return 'custom str'
def test_error_json_unknown():
s = SchemaValidator(core_schema.str_schema())
with pytest.raises(ValidationError) as exc_info:
s.validate_python(Foobar())
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'string_type',
'loc': (),
'msg': 'Input should be a valid string',
'input': HasRepr(IsStr(regex='<.+.test_errors.Foobar object at 0x[a-f0-9]{5,}>', regex_flags=re.I)),
}
]
# insert_assert(exc_info.value.json(include_url=False))
assert exc_info.value.json(include_url=False) == IsJson(
[
{
'type': 'string_type',
'loc': [],
'msg': 'Input should be a valid string',
'input': IsStr(regex='<.+.test_errors.Foobar object at 0x[a-f0-9]{5,}>', regex_flags=re.I),
}
]
)
with pytest.raises(ValidationError) as exc_info:
s.validate_python(CustomStr())
# insert_assert(json.loads(exc_info.value.json(include_url=False)))
assert exc_info.value.json(include_url=False) == IsJson(
[{'type': 'string_type', 'loc': [], 'msg': 'Input should be a valid string', 'input': 'custom str'}]
)
def test_error_json_loc():
s = SchemaValidator(
core_schema.dict_schema(core_schema.str_schema(), core_schema.list_schema(core_schema.int_schema()))
)
with pytest.raises(ValidationError) as exc_info:
s.validate_python({'a': [0, 1, 'x'], 'b': [0, 'y']})
# insert_assert(exc_info.value.json())
assert exc_info.value.json(include_url=False) == IsJson(
[
{
'type': 'int_parsing',
'loc': ['a', 2],
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'x',
},
{
'type': 'int_parsing',
'loc': ['b', 1],
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'y',
},
]
)
def test_raise_validation_error():
with pytest.raises(ValidationError, match='1 validation error for Foobar\n') as exc_info:
raise ValidationError.from_exception_data(
'Foobar', [{'type': 'greater_than', 'loc': ('a', 2), 'input': 4, 'ctx': {'gt': 5}}]
)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'greater_than', 'loc': ('a', 2), 'msg': 'Input should be greater than 5', 'input': 4, 'ctx': {'gt': 5}}
]
with pytest.raises(TypeError, match="GreaterThan: 'gt' required in context"):
raise ValidationError.from_exception_data('Foobar', [{'type': 'greater_than', 'loc': ('a', 2), 'input': 4}])
@pytest.mark.parametrize(
'hide_input_in_errors,input_str',
((False, 'type=greater_than, input_value=4, input_type=int'), (True, 'type=greater_than')),
)
def test_raise_validation_error_hide_input(hide_input_in_errors, input_str):
with pytest.raises(ValidationError, match=re.escape(f'Input should be greater than 5 [{input_str}]')):
raise ValidationError.from_exception_data(
'Foobar',
[{'type': 'greater_than', 'loc': ('a', 2), 'input': 4, 'ctx': {'gt': 5}}],
hide_input=hide_input_in_errors,
)
def test_raise_validation_error_json():
with pytest.raises(ValidationError) as exc_info:
raise ValidationError.from_exception_data('Foobar', [{'type': 'none_required', 'loc': [-42], 'input': 'x'}])
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'none_required', 'loc': (-42,), 'msg': 'Input should be None', 'input': 'x'}
]
with pytest.raises(ValidationError) as exc_info:
raise ValidationError.from_exception_data(
'Foobar', [{'type': 'none_required', 'loc': (), 'input': 'x'}], 'json'
)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'none_required', 'loc': (), 'msg': 'Input should be null', 'input': 'x'}
]
def test_raise_validation_error_custom():
custom_error = PydanticCustomError(
'my_error', 'this is a custom error {missed} {foo} {bar}', {'foo': 'X', 'bar': 42}
)
with pytest.raises(ValidationError) as exc_info:
raise ValidationError.from_exception_data('Foobar', [{'type': custom_error, 'input': 'x'}])
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'my_error',
'loc': (),
'msg': 'this is a custom error {missed} X 42',
'input': 'x',
'ctx': {'foo': 'X', 'bar': 42},
}
]
@pytest.mark.parametrize(
'msg,result_msg', [('my custom error', 'my custom error'), ('my custom error {foo}', "my custom error {'bar': []}")]
)
def test_raise_validation_error_custom_nested_ctx(msg: str, result_msg: str):
ctx = {'foo': {'bar': []}}
custom_error = PydanticCustomError('my_error', msg, ctx)
with pytest.raises(ValidationError) as exc_info:
raise ValidationError.from_exception_data('Foobar', [{'type': custom_error, 'input': 'x'}])
expected_error_detail = {'type': 'my_error', 'loc': (), 'msg': result_msg, 'input': 'x', 'ctx': ctx}
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [expected_error_detail]
assert exc_info.value.json(include_url=False) == IsJson([{**expected_error_detail, 'loc': []}])
def test_raise_validation_error_known_class_ctx():
custom_data = Foobar()
ctx = {'gt': 10, 'foo': {'bar': custom_data}}
with pytest.raises(ValidationError) as exc_info:
raise ValidationError.from_exception_data('MyTitle', [{'type': 'greater_than', 'input': 9, 'ctx': ctx}])
expected_error_detail = {
'type': 'greater_than',
'loc': (),
'msg': 'Input should be greater than 10',
'input': 9,
'ctx': ctx,
}
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [expected_error_detail]
assert exc_info.value.json(include_url=False) == IsJson(
[{**expected_error_detail, 'loc': [], 'ctx': {'gt': 10, 'foo': {'bar': str(custom_data)}}}]
)
def test_raise_validation_error_custom_class_ctx():
custom_data = Foobar()
ctx = {'foo': {'bar': custom_data}}
custom_error = PydanticCustomError('my_error', 'my message', ctx)
assert custom_error.context == ctx
with pytest.raises(ValidationError) as exc_info:
raise ValidationError.from_exception_data('MyTitle', [{'type': custom_error, 'input': 'x'}])
expected_error_detail = {'type': 'my_error', 'loc': (), 'msg': 'my message', 'input': 'x', 'ctx': ctx}
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [expected_error_detail]
assert exc_info.value.json(include_url=False) == IsJson(
[{**expected_error_detail, 'loc': [], 'ctx': {'foo': {'bar': str(custom_data)}}}]
)
def test_loc_with_dots(pydantic_version):
v = SchemaValidator(
core_schema.typed_dict_schema(
{
'a': core_schema.typed_dict_field(
core_schema.tuple_positional_schema([core_schema.int_schema(), core_schema.int_schema()]),
validation_alias='foo.bar',
)
}
)
)
assert v.validate_python({'foo.bar': (41, 42)}) == {'a': (41, 42)}
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'foo.bar': ('x', 42)})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=True) == [
{
'type': 'int_parsing',
'loc': ('foo.bar', 0),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'x',
'url': f'https://errors.pydantic.dev/{pydantic_version}/v/int_parsing',
}
]
# insert_assert(str(exc_info.value))
assert str(exc_info.value) == (
'1 validation error for typed-dict\n'
'`foo.bar`.0\n'
' Input should be a valid integer, unable to parse string as an integer '
"[type=int_parsing, input_value='x', input_type=str]"
+ (
f'\n For further information visit https://errors.pydantic.dev/{pydantic_version}/v/int_parsing'
if os.environ.get('PYDANTIC_ERRORS_INCLUDE_URL', '1') != 'false'
else ''
)
)
def test_hide_input_in_error() -> None:
s = SchemaValidator(core_schema.int_schema())
with pytest.raises(ValidationError) as exc_info:
s.validate_python('definitely not an int')
for error in exc_info.value.errors(include_input=False):
assert 'input' not in error
def test_hide_input_in_json() -> None:
s = SchemaValidator(core_schema.int_schema())
with pytest.raises(ValidationError) as exc_info:
s.validate_python('definitely not an int')
for error in exc_info.value.errors(include_input=False):
assert 'input' not in error
@pytest.mark.skipif(
sys.version_info < (3, 9) and sys.implementation.name == 'pypy',
reason='PyPy before 3.9 cannot pickle this correctly',
)
def test_validation_error_pickle() -> None:
s = SchemaValidator(core_schema.int_schema())
with pytest.raises(ValidationError) as exc_info:
s.validate_python('definitely not an int')
original = exc_info.value
roundtripped = pickle.loads(pickle.dumps(original))
assert original.errors() == roundtripped.errors()
def test_errors_include_url() -> None:
if 'PYDANTIC_ERRORS_INCLUDE_URL' in os.environ:
raise pytest.skip('cannot test when envvar is set')
s = SchemaValidator(core_schema.int_schema())
with pytest.raises(ValidationError) as exc_info:
s.validate_python('definitely not an int')
assert 'https://errors.pydantic.dev' in repr(exc_info.value)
@pytest.mark.skipif(sys.platform == 'emscripten', reason='no subprocesses on emscripten')
@pytest.mark.parametrize(
('env_var', 'env_var_value', 'expected_to_have_url'),
[
('PYDANTIC_ERRORS_INCLUDE_URL', None, True),
('PYDANTIC_ERRORS_INCLUDE_URL', '1', True),
('PYDANTIC_ERRORS_INCLUDE_URL', 'True', True),
('PYDANTIC_ERRORS_INCLUDE_URL', 'no', False),
('PYDANTIC_ERRORS_INCLUDE_URL', '0', False),
# Legacy environment variable, will raise a deprecation warning:
('PYDANTIC_ERRORS_OMIT_URL', '1', False),
('PYDANTIC_ERRORS_OMIT_URL', None, True),
],
)
def test_errors_include_url_envvar(env_var, env_var_value, expected_to_have_url) -> None:
"""
Test the `PYDANTIC_ERRORS_INCLUDE_URL` environment variable.
Since it can only be set before `ValidationError.__repr__()` is first called,
we need to spawn a subprocess to test it.
"""
code = "import pydantic_core; from pydantic_core import core_schema; pydantic_core.SchemaValidator(core_schema.int_schema()).validate_python('ooo')"
env = os.environ.copy()
# in case the ambient environment has env vars set
env.pop('PYDANTIC_ERRORS_INCLUDE_URL', None)
env.pop('PYDANTIC_ERRORS_OMIT_URL', None)
if env_var_value is not None:
env[env_var] = env_var_value
result = subprocess.run(
[sys.executable, '-W', 'default', '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8',
env=env,
)
assert result.returncode == 1
if 'PYDANTIC_ERRORS_OMIT_URL' in env:
assert 'PYDANTIC_ERRORS_OMIT_URL is deprecated' in result.stdout
assert ('https://errors.pydantic.dev' in result.stdout) == expected_to_have_url
| CustomStr |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/prefect_kubernetes/jobs.py | {
"start": 17830,
"end": 20353
} | class ____(JobBlock):
"""A block representing a Kubernetes job configuration."""
v1_job: Dict[str, Any] = Field(
default=...,
title="Job Manifest",
description=(
"The Kubernetes job manifest to run. This dictionary can be produced "
"using `yaml.safe_load`."
),
)
api_kwargs: Dict[str, Any] = Field(
default_factory=dict,
title="Additional API Arguments",
description="Additional arguments to include in Kubernetes API calls.",
examples=[{"pretty": "true"}],
)
credentials: KubernetesCredentials = Field(
default_factory=KubernetesCredentials,
description="The credentials to configure a client from.",
)
delete_after_completion: bool = Field(
default=True,
description="Whether to delete the job after it has completed.",
)
interval_seconds: int = Field(
default=5,
description="The number of seconds to wait between job status checks.",
)
namespace: str = Field(
default="default",
description="The namespace to create and run the job in.",
)
timeout_seconds: Optional[int] = Field(
default=None,
description="The number of seconds to wait for the job run before timing out.",
)
_block_type_name = "Kubernetes Job"
_block_type_slug = "k8s-job"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/2d0b896006ad463b49c28aaac14f31e00e32cfab-250x250.png" # noqa: E501
_documentation_url = "https://docs.prefect.io/integrations/prefect-kubernetes" # noqa
@sync_compatible
async def trigger(self):
"""Create a Kubernetes job and return a `KubernetesJobRun` object."""
await create_namespaced_job.fn(
kubernetes_credentials=self.credentials,
new_job=self.v1_job,
namespace=self.namespace,
**self.api_kwargs,
)
return KubernetesJobRun(kubernetes_job=self, v1_job_model=self.v1_job)
@classmethod
def from_yaml_file(
cls: Type[Self], manifest_path: Union[Path, str], **kwargs
) -> Self:
"""Create a `KubernetesJob` from a YAML file.
Args:
manifest_path: The YAML file to create the `KubernetesJob` from.
Returns:
A KubernetesJob object.
"""
with open(manifest_path, "r") as yaml_stream:
yaml_dict = yaml.safe_load(yaml_stream)
return cls(v1_job=yaml_dict, **kwargs)
| KubernetesJob |
python | weaviate__weaviate-python-client | weaviate/collections/aggregations/near_image/sync.py | {
"start": 195,
"end": 258
} | class ____(_NearImageExecutor[ConnectionSync]):
pass
| _NearImage |
python | realpython__materials | inheritance-and-composition/inheritance/employees.py | {
"start": 425,
"end": 618
} | class ____(Employee, SecretaryRole, SalaryPolicy):
def __init__(self, id, name, weekly_salary):
SalaryPolicy.__init__(self, weekly_salary)
super().__init__(id, name)
| Secretary |
python | getsentry__sentry | src/sentry/integrations/slack/message_builder/issues.py | {
"start": 17091,
"end": 29760
} | class ____(BlockSlackMessageBuilder):
"""Build an issue alert notification for Slack"""
def __init__(
self,
group: Group,
event: Event | GroupEvent | None = None,
tags: set[str] | None = None,
identity: RpcIdentity | None = None,
actions: Sequence[MessageAction | BlockKitMessageAction] | None = None,
rules: list[Rule] | None = None,
link_to_event: bool = False,
issue_details: bool = False,
notification: ProjectNotification | None = None,
recipient: Actor | None = None,
is_unfurl: bool = False,
skip_fallback: bool = False,
notes: str | None = None,
) -> None:
super().__init__()
self.group = group
self.event = event
self.tags = tags
self.identity = identity
self.actions = actions
self.rules = rules
self.link_to_event = link_to_event
self.issue_details = issue_details
self.notification = notification
self.recipient = recipient
self.is_unfurl = is_unfurl
self.skip_fallback = skip_fallback
self.notes = notes
self.issue_summary: dict[str, Any] | None = None
def get_title_block(
self,
event_or_group: Event | GroupEvent | Group,
has_action: bool,
title_link: str | None = None,
) -> SlackBlock:
summary_headline = self.get_issue_summary_headline(event_or_group)
title = summary_headline or build_attachment_title(event_or_group)
title_emojis = self.get_title_emoji(has_action)
title_text = f"{title_emojis} <{title_link}|*{escape_slack_text(title)}*>"
return self.get_markdown_block(title_text)
def get_title_emoji(self, has_action: bool) -> str:
is_error_issue = self.group.issue_category == GroupCategory.ERROR
title_emojis: list[str] = []
if has_action:
# if issue is resolved, archived, or assigned, replace circle emojis with white circle
title_emojis = (
ACTION_EMOJI
if is_error_issue
else ACTIONED_CATEGORY_TO_EMOJI.get(self.group.issue_category, [])
)
elif is_error_issue:
level_text = LOG_LEVELS[self.group.level]
title_emojis = LEVEL_TO_EMOJI.get(level_text, [])
else:
title_emojis = CATEGORY_TO_EMOJI.get(self.group.issue_category, [])
return " ".join(title_emojis)
def get_issue_summary_headline(self, event_or_group: Event | GroupEvent | Group) -> str | None:
if self.issue_summary is None:
return None
# issue summary headline is formatted like ErrorType: message...
error_type = build_attachment_title(event_or_group)
text = build_attachment_text(self.group, self.event) or ""
text = text.strip(" \r\n\u2028\u2029")
text = escape_slack_markdown_text(text)
text = text.lstrip(" ")
linebreak_match = re.search(r"\r?\n|\u2028|\u2029", text)
if linebreak_match:
text = text[: linebreak_match.start()].strip() + "..."
if len(text) > MAX_SUMMARY_HEADLINE_LENGTH:
text = text[:MAX_SUMMARY_HEADLINE_LENGTH] + "..."
headline = f"{error_type}: {text}" if text else error_type
return headline
def get_issue_summary_text(self) -> str | None:
"""Generate formatted text from issue summary fields."""
if self.issue_summary is None:
return None
parts = []
if possible_cause := self.issue_summary.get("possibleCause"):
parts.append(escape_slack_markdown_asterisks(possible_cause))
if not parts:
return None
return escape_slack_markdown_text("\n\n".join(parts))
def get_culprit_block(self, event_or_group: Event | GroupEvent | Group) -> SlackBlock | None:
if event_or_group.culprit and isinstance(event_or_group.culprit, str):
return self.get_context_block(event_or_group.culprit)
return None
def get_text_block(self, text, small: bool = False) -> SlackBlock:
if self.group.issue_category == GroupCategory.FEEDBACK:
max_block_text_length = USER_FEEDBACK_MAX_BLOCK_TEXT_LENGTH
else:
max_block_text_length = MAX_BLOCK_TEXT_LENGTH
if not small:
return self.get_markdown_quote_block(text, max_block_text_length)
else:
return self.get_context_block(text)
def get_suggested_assignees_block(self, suggested_assignees: list[str]) -> SlackBlock:
suggested_assignee_text = "Suggested Assignees: "
for assignee in suggested_assignees:
suggested_assignee_text += assignee + ", "
return self.get_context_block(suggested_assignee_text[:-2]) # get rid of comma at the end
def get_footer(self) -> SlackBlock:
# This link does not contain user input (it's a static label and a url), must not escape it.
replay_link = build_attachment_replay_link(
group=self.group,
url_format=SLACK_URL_FORMAT,
event=self.event,
)
timestamp = None
if not self.issue_details:
ts = self.group.last_seen
timestamp = max(ts, self.event.datetime) if self.event else ts
project = Project.objects.get_from_cache(id=self.group.project_id)
footer = (
self.notification.build_notification_footer(self.recipient, ExternalProviders.SLACK)
if self.notification and self.recipient
else build_slack_footer(
group=self.group,
project=project,
rules=self.rules,
)
)
if not self.notification:
# the footer content differs if it's a workflow notification, so we must check for that
footer_data = {
"Project": f"<{project.get_absolute_url()}|{escape_slack_text(project.slug)}>",
"Alert": footer,
"Short ID": self.group.qualified_short_id,
}
footer_text = ""
for k, v in footer_data.items():
footer_text += f"{k}: {v} "
if replay_link:
footer_text += replay_link
else:
footer_text = footer_text[:-4] # chop off the empty space
return self.get_context_block(text=footer_text)
else:
return self.get_context_block(text=footer, timestamp=timestamp)
def build(self, notification_uuid: str | None = None) -> SlackBlock:
self.issue_summary = fetch_issue_summary(self.group)
# XXX(dcramer): options are limited to 100 choices, even when nested
text = build_attachment_text(self.group, self.event) or ""
text = text.strip(" \n")
text = escape_slack_markdown_text(text)
project = Project.objects.get_from_cache(id=self.group.project_id)
# If an event is unspecified, use the tags of the latest event (if one exists).
event_for_tags = self.event or self.group.get_latest_event()
event_or_group: Group | Event | GroupEvent = (
self.event if self.event is not None else self.group
)
action_text = ""
if not self.issue_details or (self.recipient and self.recipient.is_team):
payload_actions, action_text, has_action = build_actions(
self.group, project, text, self.actions, self.identity
)
else:
payload_actions = []
has_action = False
rule_id = None
rule_environment_id = None
if self.rules:
if features.has("organizations:workflow-engine-ui-links", self.group.organization):
rule_id = int(get_key_from_rule_data(self.rules[0], "workflow_id"))
elif should_fire_workflow_actions(self.group.organization, self.group.type):
rule_id = int(get_key_from_rule_data(self.rules[0], "legacy_rule_id"))
else:
rule_id = self.rules[0].id
# build up actions text
if self.actions and self.identity and not action_text:
# this means somebody is interacting with the message
action_text = get_action_text(self.actions, self.identity)
has_action = True
title_link = None
if features.has("organizations:workflow-engine-ui-links", self.group.organization):
title_link = get_title_link_workflow_engine_ui(
self.group,
self.event,
self.link_to_event,
self.issue_details,
self.notification,
ExternalProviders.SLACK,
rule_id,
rule_environment_id,
notification_uuid=notification_uuid,
)
else:
title_link = get_title_link(
self.group,
self.event,
self.link_to_event,
self.issue_details,
self.notification,
ExternalProviders.SLACK,
rule_id,
rule_environment_id,
notification_uuid=notification_uuid,
)
blocks = [self.get_title_block(event_or_group, has_action, title_link)]
if culprit_block := self.get_culprit_block(event_or_group):
blocks.append(culprit_block)
# Use issue summary if available, otherwise use the default text
if summary_text := self.get_issue_summary_text():
blocks.append(self.get_text_block(summary_text, small=True))
else:
text = text.lstrip(" ")
# XXX(CEO): sometimes text is " " and slack will error if we pass an empty string (now "")
if text:
blocks.append(self.get_text_block(text))
if self.actions:
blocks.append(self.get_markdown_block(action_text))
# set up block id
block_id = {"issue": self.group.id}
if rule_id:
block_id["rule"] = rule_id
# build tags block
tags = get_tags(event_for_tags=event_for_tags, tags=self.tags)
if tags:
blocks.append(self.get_tags_block(tags, block_id))
# add event count, user count, substate, first seen
context = get_context(self.group, self.rules)
if context:
blocks.append(self.get_context_block(context))
# build actions
actions = []
try:
assignee = self.group.get_assignee()
except Actor.InvalidActor:
assignee = None
for action in payload_actions:
if action.label in (
"Archive",
"Ignore",
"Mark as Ongoing",
"Stop Ignoring",
"Resolve",
"Unresolve",
"Resolve...",
):
actions.append(self.get_button_action(action))
elif action.name == "assign":
actions.append(
self.get_external_select_action(
action, format_actor_option_slack(assignee) if assignee else None
)
)
if actions:
action_block = {"type": "actions", "elements": [action for action in actions]}
blocks.append(action_block)
# suggested assignees
suggested_assignees = []
if event_for_tags:
suggested_assignees = get_suggested_assignees(
self.group.project, event_for_tags, assignee
)
if len(suggested_assignees) > 0:
blocks.append(self.get_suggested_assignees_block(suggested_assignees))
# add suspect commit info
suspect_commit_text = get_suspect_commit_text(self.group)
if suspect_commit_text:
blocks.append(self.get_context_block(suspect_commit_text))
# add notes
if self.notes:
notes_text = f"notes: {self.notes}"
blocks.append(self.get_markdown_block(notes_text))
# build footer block
blocks.append(self.get_footer())
blocks.append(self.get_divider())
chart_block = ImageBlockBuilder(group=self.group).build_image_block()
if chart_block:
blocks.append(chart_block)
return self._build_blocks(
*blocks,
fallback_text=self.build_fallback_text(event_or_group, project.slug),
block_id=orjson.dumps(block_id).decode(),
skip_fallback=self.skip_fallback,
)
| SlackIssuesMessageBuilder |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/main.py | {
"start": 32398,
"end": 58756
} | class ____:
def __init__(self, yaml, transform=None):
# type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None
self._yaml = yaml
self._output_inited = False
self._output_path = None
self._output = self._yaml._output
self._transform = transform
# self._input_inited = False
# self._input = input
# self._input_path = None
# self._transform = yaml.transform
# self._fstream = None
if not hasattr(self._output, 'write') and hasattr(self._output, 'open'):
# pathlib.Path() instance, open with the same mode
self._output_path = self._output
self._output = self._output_path.open('w')
# if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
# if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
# # pathlib.Path() instance, open with the same mode
# self._input_path = self._input
# self._input = self._input_path.open('r')
if self._transform is not None:
self._fstream = self._output
if self._yaml.encoding is None:
self._output = StringIO()
else:
self._output = BytesIO()
def teardown_output(self):
# type: () -> None
if self._output_inited:
self._yaml.serializer.close()
else:
return
try:
self._yaml.emitter.dispose()
except AttributeError:
raise
# self.dumper.dispose() # cyaml
try:
delattr(self._yaml, '_serializer')
delattr(self._yaml, '_emitter')
except AttributeError:
raise
if self._transform:
val = self._output.getvalue()
if self._yaml.encoding:
val = val.decode(self._yaml.encoding)
if self._fstream is None:
self._transform(val)
else:
self._fstream.write(self._transform(val))
self._fstream.flush()
self._output = self._fstream # maybe not necessary
if self._output_path is not None:
self._output.close()
def init_output(self, first_data):
# type: (Any) -> None
if self._yaml.top_level_colon_align is True:
tlca = max([len(str(x)) for x in first_data]) # type: Any
else:
tlca = self._yaml.top_level_colon_align
self._yaml.get_serializer_representer_emitter(self._output, tlca)
self._yaml.serializer.open()
self._output_inited = True
def dump(self, data):
# type: (Any) -> None
if not self._output_inited:
self.init_output(data)
try:
self._yaml.representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
# def teardown_input(self):
# pass
#
# def init_input(self):
# # set the constructor and parser on YAML() instance
# self._yaml.get_constructor_parser(stream)
#
# def load(self):
# if not self._input_inited:
# self.init_input()
# try:
# while self._yaml.constructor.check_data():
# yield self._yaml.constructor.get_data()
# finally:
# parser.dispose()
# try:
# self._reader.reset_reader() # type: ignore
# except AttributeError:
# pass
# try:
# self._scanner.reset_scanner() # type: ignore
# except AttributeError:
# pass
def yaml_object(yml):
# type: (Any) -> Any
""" decorator for classes that needs to dump/load objects
The tag for such objects is taken from the class attribute yaml_tag (or the
class name in lowercase in case unavailable)
If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
loading, default routines (dumping a mapping of the attributes) used otherwise.
"""
def yo_deco(cls):
# type: (Any) -> Any
tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
try:
yml.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
tag, data, cls, flow_style=representer.default_flow_style
)
yml.representer.add_representer(cls, t_y)
try:
yml.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
yml.constructor.add_constructor(tag, f_y)
return cls
return yo_deco
########################################################################################
def warn_deprecation(fun, method, arg=''):
# type: (Any, Any, str) -> None
from spack.vendor.ruamel.yaml.compat import _F
warnings.warn(
_F(
'\n{fun} will be removed, use\n\n yaml=YAML({arg})\n yaml.{method}(...)\n\ninstead', # NOQA
fun=fun,
method=method,
arg=arg,
),
PendingDeprecationWarning, # this will show when testing with pytest/tox
stacklevel=3,
)
########################################################################################
def scan(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Scan a YAML stream and produce scanning tokens.
"""
warn_deprecation('scan', 'scan', arg="typ='unsafe', pure=True")
loader = Loader(stream)
try:
while loader.scanner.check_token():
yield loader.scanner.get_token()
finally:
loader._parser.dispose()
def parse(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse a YAML stream and produce parsing events.
"""
warn_deprecation('parse', 'parse', arg="typ='unsafe', pure=True")
loader = Loader(stream)
try:
while loader._parser.check_event():
yield loader._parser.get_event()
finally:
loader._parser.dispose()
def compose(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True")
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
# type: (StreamTextType, Any) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True")
loader = Loader(stream)
try:
while loader.check_node():
yield loader._composer.get_node()
finally:
loader._parser.dispose()
def load(stream, Loader=None, version=None, preserve_quotes=None):
# type: (Any, Any, Any, Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
warn_deprecation('load', 'load', arg="typ='unsafe', pure=True")
if Loader is None:
warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any
try:
return loader._constructor.get_single_data()
finally:
loader._parser.dispose()
try:
loader._reader.reset_reader()
except AttributeError:
pass
try:
loader._scanner.reset_scanner()
except AttributeError:
pass
def load_all(stream, Loader=None, version=None, preserve_quotes=None):
# type: (Any, Any, Any, Any) -> Any # NOQA
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
warn_deprecation('load_all', 'load_all', arg="typ='unsafe', pure=True")
if Loader is None:
warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any
try:
while loader._constructor.check_data():
yield loader._constructor.get_data()
finally:
loader._parser.dispose()
try:
loader._reader.reset_reader()
except AttributeError:
pass
try:
loader._scanner.reset_scanner()
except AttributeError:
pass
def safe_load(stream, version=None):
# type: (StreamTextType, Optional[VersionType]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
warn_deprecation('safe_load', 'load', arg="typ='safe', pure=True")
return load(stream, SafeLoader, version)
def safe_load_all(stream, version=None):
# type: (StreamTextType, Optional[VersionType]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
warn_deprecation('safe_load_all', 'load_all', arg="typ='safe', pure=True")
return load_all(stream, SafeLoader, version)
def round_trip_load(stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
warn_deprecation('round_trip_load_all', 'load')
return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
def round_trip_load_all(stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
warn_deprecation('round_trip_load_all', 'load_all')
return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
def emit(
events,
stream=None,
Dumper=Dumper,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
):
# type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
warn_deprecation('emit', 'emit', arg="typ='safe', pure=True")
getvalue = None
if stream is None:
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
)
try:
for event in events:
dumper.emit(event)
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
enc = None
def serialize_all(
nodes,
stream=None,
Dumper=Dumper,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
):
# type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
warn_deprecation('serialize_all', 'serialize_all', arg="typ='safe', pure=True")
getvalue = None
if stream is None:
if encoding is None:
stream = StringIO()
else:
stream = BytesIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
version=version,
tags=tags,
explicit_start=explicit_start,
explicit_end=explicit_end,
)
try:
dumper._serializer.open()
for node in nodes:
dumper.serialize(node)
dumper._serializer.close()
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
# type: (Any, Optional[StreamType], Any, Any) -> Any
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
warn_deprecation('serialize', 'serialize', arg="typ='safe', pure=True")
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(
documents,
stream=None,
Dumper=Dumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Any # NOQA
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
warn_deprecation('dump_all', 'dump_all', arg="typ='unsafe', pure=True")
getvalue = None
if top_level_colon_align is True:
top_level_colon_align = max([len(str(x)) for x in documents[0]])
if stream is None:
if encoding is None:
stream = StringIO()
else:
stream = BytesIO()
getvalue = stream.getvalue
dumper = Dumper(
stream,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
)
try:
dumper._serializer.open()
for data in documents:
try:
dumper._representer.represent(data)
except AttributeError:
# nprint(dir(dumper._representer))
raise
dumper._serializer.close()
finally:
try:
dumper._emitter.dispose()
except AttributeError:
raise
dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
return None
def dump(
data,
stream=None,
Dumper=Dumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[Any] # NOQA
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
default_style ∈ None, '', '"', "'", '|', '>'
"""
warn_deprecation('dump', 'dump', arg="typ='unsafe', pure=True")
return dump_all(
[data],
stream,
Dumper=Dumper,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
)
def safe_dump_all(documents, stream=None, **kwds):
# type: (Any, Optional[StreamType], Any) -> Optional[Any]
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
warn_deprecation('safe_dump_all', 'dump_all', arg="typ='safe', pure=True")
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
# type: (Any, Optional[StreamType], Any) -> Optional[Any]
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
warn_deprecation('safe_dump', 'dump', arg="typ='safe', pure=True")
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def round_trip_dump(
data,
stream=None,
Dumper=RoundTripDumper,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=enc,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[Any] # NOQA
allow_unicode = True if allow_unicode is None else allow_unicode
warn_deprecation('round_trip_dump', 'dump')
return dump_all(
[data],
stream,
Dumper=Dumper,
default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
)
# Loader/Dumper are no longer composites, to get to the associated
# Resolver()/Representer(), etc., you need to instantiate the class
def add_implicit_resolver(
tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
if Loader is None and Dumper is None:
resolver.add_implicit_resolver(tag, regexp, first)
return
if Loader:
if hasattr(Loader, 'add_implicit_resolver'):
Loader.add_implicit_resolver(tag, regexp, first)
elif issubclass(
Loader, (BaseLoader, SafeLoader, spack.vendor.ruamel.yaml.loader.Loader, RoundTripLoader)
):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, 'add_implicit_resolver'):
Dumper.add_implicit_resolver(tag, regexp, first)
elif issubclass(
Dumper, (BaseDumper, SafeDumper, spack.vendor.ruamel.yaml.dumper.Dumper, RoundTripDumper)
):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
# this code currently not tested
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
if Loader is None and Dumper is None:
resolver.add_path_resolver(tag, path, kind)
return
if Loader:
if hasattr(Loader, 'add_path_resolver'):
Loader.add_path_resolver(tag, path, kind)
elif issubclass(
Loader, (BaseLoader, SafeLoader, spack.vendor.ruamel.yaml.loader.Loader, RoundTripLoader)
):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, 'add_path_resolver'):
Dumper.add_path_resolver(tag, path, kind)
elif issubclass(
Dumper, (BaseDumper, SafeDumper, spack.vendor.ruamel.yaml.dumper.Dumper, RoundTripDumper)
):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor):
# type: (Any, Any, Any, Any) -> None
"""
Add an object constructor for the given tag.
object_onstructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
if Loader is None:
constructor.add_constructor(tag, object_constructor)
else:
if hasattr(Loader, 'add_constructor'):
Loader.add_constructor(tag, object_constructor)
return
if issubclass(Loader, BaseLoader):
BaseConstructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, SafeLoader):
SafeConstructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, Loader):
Constructor.add_constructor(tag, object_constructor)
elif issubclass(Loader, RoundTripLoader):
RoundTripConstructor.add_constructor(tag, object_constructor)
else:
raise NotImplementedError
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor):
# type: (Any, Any, Any, Any) -> None
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
if Loader is None:
constructor.add_multi_constructor(tag_prefix, multi_constructor)
else:
if False and hasattr(Loader, 'add_multi_constructor'):
Loader.add_multi_constructor(tag_prefix, constructor)
return
if issubclass(Loader, BaseLoader):
BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, SafeLoader):
SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, spack.vendor.ruamel.yaml.loader.Loader):
Constructor.add_multi_constructor(tag_prefix, multi_constructor)
elif issubclass(Loader, RoundTripLoader):
RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
else:
raise NotImplementedError
def add_representer(data_type, object_representer, Dumper=None, representer=Representer):
# type: (Any, Any, Any, Any) -> None
"""
Add a representer for the given type.
object_representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
if Dumper is None:
representer.add_representer(data_type, object_representer)
else:
if hasattr(Dumper, 'add_representer'):
Dumper.add_representer(data_type, object_representer)
return
if issubclass(Dumper, BaseDumper):
BaseRepresenter.add_representer(data_type, object_representer)
elif issubclass(Dumper, SafeDumper):
SafeRepresenter.add_representer(data_type, object_representer)
elif issubclass(Dumper, Dumper):
Representer.add_representer(data_type, object_representer)
elif issubclass(Dumper, RoundTripDumper):
RoundTripRepresenter.add_representer(data_type, object_representer)
else:
raise NotImplementedError
# this code currently not tested
def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer):
# type: (Any, Any, Any, Any) -> None
"""
Add a representer for the given type.
multi_representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
if Dumper is None:
representer.add_multi_representer(data_type, multi_representer)
else:
if hasattr(Dumper, 'add_multi_representer'):
Dumper.add_multi_representer(data_type, multi_representer)
return
if issubclass(Dumper, BaseDumper):
BaseRepresenter.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, SafeDumper):
SafeRepresenter.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, Dumper):
Representer.add_multi_representer(data_type, multi_representer)
elif issubclass(Dumper, RoundTripDumper):
RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
else:
raise NotImplementedError
| YAMLContextManager |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/llama_index/vector_stores/azureaisearch/base.py | {
"start": 64267,
"end": 64623
} | class ____(AzureQueryResultSearchBase):
def _create_search_query(self) -> str:
if self._query.query_str is None:
raise ValueError("Query missing query string")
search_query = self._query.query_str
logger.info(f"Hybrid search with search text: {search_query}")
return search_query
| AzureQueryResultSearchSparse |
python | tensorflow__tensorflow | tensorflow/python/data/ops/zip_op.py | {
"start": 1046,
"end": 2353
} | class ____(dataset_ops.DatasetV2):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets, name=None):
"""See `Dataset.zip()` for details."""
for ds in nest.flatten(datasets):
if not isinstance(ds, data_types.DatasetV2):
if isinstance(ds, list):
raise TypeError(
"Invalid input to `zip`. Inputs are expected to be (nested)"
" structures of `tf.data.Dataset` objects. Python `list` is"
" not supported and you should use `tuple` instead."
)
else:
raise TypeError(
"Invalid input to `zip`. Inputs are expected to be (nested)"
" structures of `tf.data.Dataset` objects but"
f" encountered object of type {type(ds)}."
)
self._datasets = datasets
self._structure = nest.pack_sequence_as(
self._datasets, [ds.element_spec for ds in nest.flatten(self._datasets)]
)
self._name = name
variant_tensor = gen_dataset_ops.zip_dataset(
[ds._variant_tensor for ds in nest.flatten(self._datasets)],
**self._common_args,
)
super().__init__(variant_tensor)
def _inputs(self):
return nest.flatten(self._datasets)
@property
def element_spec(self):
return self._structure
| _ZipDataset |
python | django-guardian__django-guardian | guardian/exceptions.py | {
"start": 571,
"end": 718
} | class ____(GuardianError):
"""Raised when content type for the provided permissions and/or class do not match."""
pass
| MixedContentTypeError |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 83491,
"end": 83744
} | class ____(Response):
"""
Response of workers.status_report endpoint.
"""
_service = "workers"
_action = "status_report"
_version = "2.13"
_schema = {"definitions": {}, "properties": {}, "type": "object"}
| StatusReportResponse |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/editorstack/editorstack.py | {
"start": 2469,
"end": 3041
} | class ____:
CopyAbsolutePath = "copy_absolute_path_action"
CopyRelativePath = "copy_relative_path_action"
CloseAllRight = "close_all_rigth_action"
CloseAllLeft = "close_all_left_action"
CloseAllButThis = "close_all_but_this_action"
SortTabs = "sort_tabs_action"
ShowInExternalFileExplorer = "show in external file explorer"
NewWindow = "new_window_action"
SplitVertically = "split vertically"
SplitHorizontally = "split horizontally"
CloseSplitPanel = "close split panel"
CloseWindow = "close_window_action"
| EditorStackActions |
python | huggingface__transformers | src/transformers/models/vipllava/modular_vipllava.py | {
"start": 2348,
"end": 7197
} | class ____(LlavaModel):
def get_image_features(
self, pixel_values: torch.FloatTensor, vision_feature_layers: Optional[Union[int, list[int]]] = None
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
vision_feature_layers (`Union[int, list[int]]`):
The vision feature layer, or the list of indexes of the layers to select
the vision feature.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_feature_layers = (
vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers
)
image_outputs = self.vision_tower(pixel_values, output_hidden_states=True)
# If multiple feature layers are provided (which is usually the case)
# then the image features are concatenated after the CLS is removed.
if isinstance(vision_feature_layers, int):
image_features = image_outputs.hidden_states[vision_feature_layers][:, 1:]
else:
# Usually, we select the features from index 1: the layers -2, -5, -8, -11 and 6
image_features = [image_outputs.hidden_states[index][:, 1:] for index in vision_feature_layers]
image_features = torch.cat(image_features, dim=-1)
image_features = self.multi_modal_projector(image_features)
return image_features
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layers: Optional[Union[int, list[int]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**lm_kwargs,
) -> Union[tuple, VipLlavaModelOutputWithPast]:
r"""
vision_feature_layers (`Union[int, list[int]]`, *optional*):
The vision feature layer, or the list of indexes of the layers to select
the vision feature.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layers = (
vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values, vision_feature_layers=vision_feature_layers
)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**lm_kwargs,
)
output = VipLlavaModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
return output if return_dict else output.to_tuple()
| VipLlavaModel |
python | getsentry__sentry | tests/sentry/receivers/test_featureadoption.py | {
"start": 821,
"end": 23317
} | class ____(TestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.now = timezone.now()
self.owner = self.create_user()
self.organization = self.create_organization(owner=self.owner)
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(teams=[self.team])
def test_bad_feature_slug(self) -> None:
FeatureAdoption.objects.record(self.organization.id, "xxx")
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert feature_complete is None
def test_all_passed_feature_slugs_are_complete(self) -> None:
event1 = self.store_event(
data={"tags": {"environment": "prod"}}, project_id=self.project.id
)
event2 = self.store_event(
data={"tags": {"environment": "prod"}}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event1, sender=type(self.project))
event_processed.send(project=self.project, event=event2, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert feature_complete.complete
def test_first_event(self) -> None:
event = self.store_event(
data={"platform": "javascript", "message": "javascript error message"},
project_id=self.project.id,
)
first_event_received.send(project=self.project, event=event, sender=type(self.project))
first_event = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert first_event.complete
def test_javascript(self) -> None:
event = self.store_event(data={"platform": "javascript"}, project_id=self.project.id)
event_processed.send(project=self.project, event=event, sender=type(self.project))
js = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="javascript")
assert js.complete
def test_python(self) -> None:
event = self.store_event(
data={"platform": "python", "message": "python error message"},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
python = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="python")
assert python.complete
def test_node(self) -> None:
event = self.store_event(
data={"platform": "node", "message": "node error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
node = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="node")
assert node.complete
def test_ruby(self) -> None:
event = self.store_event(
data={"platform": "ruby", "message": "ruby error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
ruby = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="ruby")
assert ruby.complete
def test_java(self) -> None:
event = self.store_event(
data={"platform": "java", "message": "java error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
java = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="java")
assert java.complete
def test_cocoa(self) -> None:
event = self.store_event(
data={"platform": "cocoa", "message": "cocoa error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
cocoa = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="cocoa")
assert cocoa.complete
def test_objc(self) -> None:
event = self.store_event(
data={"platform": "objc", "message": "objc error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
objc = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="objc")
assert objc.complete
def test_php(self) -> None:
event = self.store_event(
data={"platform": "php", "message": "php error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
php = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="php")
assert php.complete
def test_go(self) -> None:
event = self.store_event(
data={"platform": "go", "message": "go error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
go = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="go")
assert go.complete
def test_csharp(self) -> None:
event = self.store_event(
data={"platform": "csharp", "message": "csharp error message"},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
csharp = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="csharp")
assert csharp.complete
def test_perl(self) -> None:
event = self.store_event(
data={"platform": "perl", "message": "perl error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
perl = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="perl")
assert perl.complete
def test_elixir(self) -> None:
event = self.store_event(
data={"platform": "elixir", "message": "elixir error message"},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
elixir = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="elixir")
assert elixir.complete
def test_cfml(self) -> None:
event = self.store_event(
data={"platform": "cfml", "message": "cfml error message"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
cfml = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="cfml")
assert cfml.complete
def test_groovy(self) -> None:
event = self.store_event(
data={"platform": "groovy", "message": "groovy error message"},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
groovy = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="groovy")
assert groovy.complete
def test_release_tracking(self) -> None:
event = self.store_event(data={"tags": {"sentry:release": "1"}}, project_id=self.project.id)
event_processed.send(project=self.project, event=event, sender=type(self.project))
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
def test_environment_tracking(self) -> None:
event = self.store_event(data={"environment": "prod"}, project_id=self.project.id)
event_processed.send(project=self.project, event=event, sender=type(self.project))
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
def test_bulk_create(self) -> None:
event = self.store_event(
data={
"platform": "javascript",
"environment": "prod",
"tags": {"sentry:release": "abc"},
"user": {"id": "123"},
},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
javascript = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="javascript"
)
assert javascript
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
def test_user_tracking(self) -> None:
event = self.store_event(data={"user": {"id": "123"}}, project_id=self.project.id)
event_processed.send(project=self.project, event=event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
def test_no_user_tracking_for_ip_address_only(self) -> None:
"""test to see if just sending ip address doesn't check the user tracking box"""
userless_event = self.store_event(
data={"user": {"ip_address": "0.0.0.0"}}, project_id=self.project.id
)
event_processed.send(project=self.project, event=userless_event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete is None
def test_no_env_tracking(self) -> None:
envless_event = self.store_event(
data={"platform": "javascript"}, project_id=self.project.id
)
event_processed.send(project=self.project, event=envless_event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert feature_complete is None
def test_custom_tags(self) -> None:
event = self.store_event(data={}, project_id=self.project.id)
event.data["tags"].append(("foo", "bar"))
assert event.get_tag("foo") == "bar"
event_processed.send(project=self.project, event=event, sender=type(self.project))
custom_tags = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="custom_tags"
)
assert custom_tags
def test_source_maps(self) -> None:
event = self.store_event(
data={
"platform": "javascript",
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
},
},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
source_maps = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="source_maps"
)
assert source_maps
def test_breadcrumbs(self) -> None:
event = self.store_event(
data={
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST",
},
}
]
}
},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=event, sender=type(self.project))
breadcrumbs = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="breadcrumbs"
)
assert breadcrumbs
def test_multiple_events(self) -> None:
simple_event = self.store_event(
data={"message": "javascript error message", "platform": "javascript"},
project_id=self.project.id,
)
first_event_received.send(
project=self.project, event=simple_event, sender=type(self.project)
)
event_processed.send(project=self.project, event=simple_event, sender=type(self.project))
first_event = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert first_event.complete
js = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="javascript")
assert js.complete
full_event = self.store_event(
data={
"message": "javascript error message",
"platform": "javascript",
"environment": "prod",
"tags": {"sentry:release": "abc"},
"user": {"id": "123"},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"data": {
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map"
}
}
]
},
"type": "TypeError",
}
]
},
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST",
},
}
]
},
},
project_id=self.project.id,
)
event_processed.send(project=self.project, event=full_event, sender=type(self.project))
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
source_maps = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="source_maps"
)
assert source_maps
breadcrumbs = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="breadcrumbs"
)
assert breadcrumbs
def test_user_feedback(self) -> None:
user_feedback_received.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_feedback"
)
assert feature_complete
def test_project_created(self) -> None:
project_created.send(project=self.project, user=self.owner, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_project"
)
assert feature_complete
def test_member_joined(self) -> None:
member = self.create_member(
organization=self.organization, teams=[self.team], user=self.create_user()
)
member_joined.send(
organization_member_id=member.id,
organization_id=self.organization.id,
user_id=member.user_id,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="invite_team"
)
assert feature_complete
def test_assignment(self) -> None:
GroupAssignee.objects.create(
group_id=self.group.id, user_id=self.user.id, project_id=self.project.id
)
issue_assigned.send(
project=self.project, group=self.group, user=self.user, sender="something"
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="assignment"
)
assert feature_complete
def test_resolved_in_release(self) -> None:
issue_resolved.send(
organization_id=self.organization.id,
project=self.project,
group=self.group,
user=self.user,
resolution_type="in_next_release",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="resolved_in_release"
)
assert feature_complete
def test_resolved_manually(self) -> None:
issue_resolved.send(
organization_id=self.organization.id,
project=self.project,
group=self.group,
user=self.user,
resolution_type="now",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="resolved_in_release"
)
assert not feature_complete
def test_advanced_search(self) -> None:
advanced_search.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="advanced_search"
)
assert feature_complete
def test_save_search(self) -> None:
save_search_created.send(project=self.project, user=self.user, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="saved_search"
)
assert feature_complete
def test_inbound_filters(self) -> None:
inbound_filter_toggled.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="inbound_filters"
)
assert feature_complete
def test_alert_rules(self) -> None:
rule = Rule.objects.create(
project=self.project, label="Trivially modified rule", data=DEFAULT_RULE_DATA
)
alert_rule_created.send(
user=self.owner,
project=self.project,
rule_id=rule.id,
rule_type="issue",
sender=type(self.project),
is_api_token=False,
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="alert_rules"
)
assert feature_complete
def test_issue_tracker_plugin(self) -> None:
plugin_enabled.send(
plugin=IssueTrackingPlugin2(),
project=self.project,
user=self.owner,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="issue_tracker_integration"
)
assert feature_complete
def test_notification_plugin(self) -> None:
plugin_enabled.send(
plugin=NotificationPlugin(),
project=self.project,
user=self.owner,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="notification_integration"
)
assert feature_complete
def test_sso(self) -> None:
sso_enabled.send(
organization_id=self.organization.id,
user_id=self.user.id,
provider="google",
sender=type(self.organization),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="sso"
)
assert feature_complete
def test_data_scrubber(self) -> None:
data_scrubber_enabled.send(organization=self.organization, sender=type(self.organization))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="data_scrubbers"
)
assert feature_complete
def test_delete_and_discard(self) -> None:
GroupTombstone.objects.create(previous_group_id=self.group.id, project=self.project)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="delete_and_discard"
)
assert feature_complete
| FeatureAdoptionTest |
python | pydantic__pydantic | pydantic/v1/fields.py | {
"start": 48882,
"end": 50366
} | class ____(Representation):
__slots__ = ('default', 'default_factory')
def __init__(self, default: Any = Undefined, *, default_factory: Optional[NoArgAnyCallable] = None) -> None:
self.default = default
self.default_factory = default_factory
def get_default(self) -> Any:
return smart_deepcopy(self.default) if self.default_factory is None else self.default_factory()
def __eq__(self, other: Any) -> bool:
return isinstance(other, self.__class__) and (self.default, self.default_factory) == (
other.default,
other.default_factory,
)
def PrivateAttr(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
) -> Any:
"""
Indicates that attribute is only used internally and never mixed with regular fields.
Types or values of private attrs are not checked by pydantic and it's up to you to keep them relevant.
Private attrs are stored in model __slots__.
:param default: the attribute’s default value
:param default_factory: callable that will be called when a default value is needed for this attribute
If both `default` and `default_factory` are set, an error is raised.
"""
if default is not Undefined and default_factory is not None:
raise ValueError('cannot specify both default and default_factory')
return ModelPrivateAttr(
default,
default_factory=default_factory,
)
| ModelPrivateAttr |
python | ansible__ansible | test/units/parsing/vault/test_vault.py | {
"start": 16615,
"end": 17654
} | class ____(unittest.TestCase):
def test_bytes_not_encrypted(self):
b_data = b"foobar"
self.assertFalse(vault.is_encrypted(b_data))
def test_bytes_encrypted(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
self.assertTrue(vault.is_encrypted(b_data))
def test_text_not_encrypted(self):
b_data = to_text(b"foobar")
self.assertFalse(vault.is_encrypted(b_data))
def test_text_encrypted(self):
b_data = to_text(b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible"))
self.assertTrue(vault.is_encrypted(b_data))
def test_invalid_text_not_ascii(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
self.assertFalse(vault.is_encrypted(data))
def test_invalid_bytes_not_ascii(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
b_data = to_bytes(data, encoding='utf-8')
self.assertFalse(vault.is_encrypted(b_data))
| TestVaultIsEncrypted |
python | getsentry__sentry | src/sentry/integrations/models/integration_feature.py | {
"start": 4402,
"end": 4673
} | class ____(Enum):
SENTRY_APP = 0
DOC_INTEGRATION = 1
INTEGRATION_MODELS_BY_TYPE: dict[int, type[SentryApp] | type[DocIntegration]] = {
IntegrationTypes.SENTRY_APP.value: SentryApp,
IntegrationTypes.DOC_INTEGRATION.value: DocIntegration,
}
| IntegrationTypes |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linalg_ops_test.py | {
"start": 16156,
"end": 17230
} | class ____(object):
dtype = np.float32
use_static_shape = True
def test_non_batch(self):
x_ = np.array([[1, 2], [3, 4]], dtype=self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = linalg.lu_matrix_inverse(*linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3)
def test_batch(self):
x_ = np.array([
[[1, 2], [3, 4]],
[[7, 8], [3, 4]],
[[0.25, 0.5], [0.75, -2.]],
],
dtype=self.dtype)
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
y = linalg.lu_matrix_inverse(*linalg.lu(x), validate_args=True)
y_ = self.evaluate(y)
if self.use_static_shape:
self.assertAllEqual(x_.shape, y.shape)
self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3)
@test_util.run_all_in_graph_and_eager_modes
| _LUMatrixInverse |
python | pypa__setuptools | setuptools/_vendor/backports/tarfile/__init__.py | {
"start": 24261,
"end": 24427
} | class ____(FilterError):
def __init__(self, tarinfo):
self.tarinfo = tarinfo
super().__init__(f'{tarinfo.name!r} is a special file')
| SpecialFileError |
python | kamyu104__LeetCode-Solutions | Python/tree-diameter.py | {
"start": 1774,
"end": 2892
} | class ____(object):
def treeDiameter(self, edges):
"""
:type edges: List[List[int]]
:rtype: int
"""
def bfs():
result = 0
dp = [0]*len(adj)
degree = map(len, adj)
q = [u for u in xrange(len(degree)) if degree[u] == 1]
while q:
new_q = []
for u in q:
if degree[u] == 0:
continue
degree[u] -= 1
for v in adj[u]:
if degree[v] == 0:
continue
result = max(result, dp[v]+(dp[u]+1))
dp[v] = max(dp[v], (dp[u]+1))
degree[v] -= 1
if degree[v] == 1:
new_q.append(v)
q = new_q
return result
adj = [[] for _ in range(len(edges)+1)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return bfs()
# Time: O(|V| + |E|)
# Space: O(|E|)
# bfs
| Solution3 |
python | pyqtgraph__pyqtgraph | pyqtgraph/dockarea/Container.py | {
"start": 6717,
"end": 6967
} | class ____(QtWidgets.QStackedWidget):
def __init__(self, *, container):
super().__init__()
self.container = container
def childEvent(self, ev):
super().childEvent(ev)
self.container.childEvent_(ev)
| StackedWidget |
python | PyCQA__bandit | bandit/core/manager.py | {
"start": 767,
"end": 17283
} | class ____:
scope = []
def __init__(
self,
config,
agg_type,
debug=False,
verbose=False,
quiet=False,
profile=None,
ignore_nosec=False,
):
"""Get logger, config, AST handler, and result store ready
:param config: config options object
:type config: bandit.core.BanditConfig
:param agg_type: aggregation type
:param debug: Whether to show debug messages or not
:param verbose: Whether to show verbose output
:param quiet: Whether to only show output in the case of an error
:param profile_name: Optional name of profile to use (from cmd line)
:param ignore_nosec: Whether to ignore #nosec or not
:return:
"""
self.debug = debug
self.verbose = verbose
self.quiet = quiet
if not profile:
profile = {}
self.ignore_nosec = ignore_nosec
self.b_conf = config
self.files_list = []
self.excluded_files = []
self.b_ma = b_meta_ast.BanditMetaAst()
self.skipped = []
self.results = []
self.baseline = []
self.agg_type = agg_type
self.metrics = metrics.Metrics()
self.b_ts = b_test_set.BanditTestSet(config, profile)
self.scores = []
def get_skipped(self):
ret = []
# "skip" is a tuple of name and reason, decode just the name
for skip in self.skipped:
if isinstance(skip[0], bytes):
ret.append((skip[0].decode("utf-8"), skip[1]))
else:
ret.append(skip)
return ret
def get_issue_list(
self, sev_level=b_constants.LOW, conf_level=b_constants.LOW
):
return self.filter_results(sev_level, conf_level)
def populate_baseline(self, data):
"""Populate a baseline set of issues from a JSON report
This will populate a list of baseline issues discovered from a previous
run of bandit. Later this baseline can be used to filter out the result
set, see filter_results.
"""
items = []
try:
jdata = json.loads(data)
items = [issue.issue_from_dict(j) for j in jdata["results"]]
except Exception as e:
LOG.warning("Failed to load baseline data: %s", e)
self.baseline = items
def filter_results(self, sev_filter, conf_filter):
"""Returns a list of results filtered by the baseline
This works by checking the number of results returned from each file we
process. If the number of results is different to the number reported
for the same file in the baseline, then we return all results for the
file. We can't reliably return just the new results, as line numbers
will likely have changed.
:param sev_filter: severity level filter to apply
:param conf_filter: confidence level filter to apply
"""
results = [
i for i in self.results if i.filter(sev_filter, conf_filter)
]
if not self.baseline:
return results
unmatched = _compare_baseline_results(self.baseline, results)
# if it's a baseline we'll return a dictionary of issues and a list of
# candidate issues
return _find_candidate_matches(unmatched, results)
def results_count(
self, sev_filter=b_constants.LOW, conf_filter=b_constants.LOW
):
"""Return the count of results
:param sev_filter: Severity level to filter lower
:param conf_filter: Confidence level to filter
:return: Number of results in the set
"""
return len(self.get_issue_list(sev_filter, conf_filter))
def output_results(
self,
lines,
sev_level,
conf_level,
output_file,
output_format,
template=None,
):
"""Outputs results from the result store
:param lines: How many surrounding lines to show per result
:param sev_level: Which severity levels to show (LOW, MEDIUM, HIGH)
:param conf_level: Which confidence levels to show (LOW, MEDIUM, HIGH)
:param output_file: File to store results
:param output_format: output format plugin name
:param template: Output template with non-terminal tags <N>
(default: {abspath}:{line}:
{test_id}[bandit]: {severity}: {msg})
:return: -
"""
try:
formatters_mgr = extension_loader.MANAGER.formatters_mgr
if output_format not in formatters_mgr:
output_format = (
"screen"
if (
sys.stdout.isatty()
and os.getenv("NO_COLOR") is None
and os.getenv("TERM") != "dumb"
)
else "txt"
)
formatter = formatters_mgr[output_format]
report_func = formatter.plugin
if output_format == "custom":
report_func(
self,
fileobj=output_file,
sev_level=sev_level,
conf_level=conf_level,
template=template,
)
else:
report_func(
self,
fileobj=output_file,
sev_level=sev_level,
conf_level=conf_level,
lines=lines,
)
except Exception as e:
raise RuntimeError(
f"Unable to output report using "
f"'{output_format}' formatter: {str(e)}"
)
def discover_files(self, targets, recursive=False, excluded_paths=""):
"""Add tests directly and from a directory to the test set
:param targets: The command line list of files and directories
:param recursive: True/False - whether to add all files from dirs
:return:
"""
# We'll mantain a list of files which are added, and ones which have
# been explicitly excluded
files_list = set()
excluded_files = set()
excluded_path_globs = self.b_conf.get_option("exclude_dirs") or []
included_globs = self.b_conf.get_option("include") or ["*.py"]
# if there are command line provided exclusions add them to the list
if excluded_paths:
for path in excluded_paths.split(","):
if os.path.isdir(path):
path = os.path.join(path, "*")
excluded_path_globs.append(path)
# build list of files we will analyze
for fname in targets:
# if this is a directory and recursive is set, find all files
if os.path.isdir(fname):
if recursive:
new_files, newly_excluded = _get_files_from_dir(
fname,
included_globs=included_globs,
excluded_path_strings=excluded_path_globs,
)
files_list.update(new_files)
excluded_files.update(newly_excluded)
else:
LOG.warning(
"Skipping directory (%s), use -r flag to "
"scan contents",
fname,
)
else:
# if the user explicitly mentions a file on command line,
# we'll scan it, regardless of whether it's in the included
# file types list
if _is_file_included(
fname,
included_globs,
excluded_path_globs,
enforce_glob=False,
):
if fname != "-":
fname = os.path.join(".", fname)
files_list.add(fname)
else:
excluded_files.add(fname)
self.files_list = sorted(files_list)
self.excluded_files = sorted(excluded_files)
def run_tests(self):
"""Runs through all files in the scope
:return: -
"""
# if we have problems with a file, we'll remove it from the files_list
# and add it to the skipped list instead
new_files_list = list(self.files_list)
if (
len(self.files_list) > PROGRESS_THRESHOLD
and LOG.getEffectiveLevel() <= logging.INFO
):
files = progress.track(self.files_list)
else:
files = self.files_list
for count, fname in enumerate(files):
LOG.debug("working on file : %s", fname)
try:
if fname == "-":
open_fd = os.fdopen(sys.stdin.fileno(), "rb", 0)
fdata = io.BytesIO(open_fd.read())
new_files_list = [
"<stdin>" if x == "-" else x for x in new_files_list
]
self._parse_file("<stdin>", fdata, new_files_list)
else:
with open(fname, "rb") as fdata:
self._parse_file(fname, fdata, new_files_list)
except OSError as e:
self.skipped.append((fname, e.strerror))
new_files_list.remove(fname)
# reflect any files which may have been skipped
self.files_list = new_files_list
# do final aggregation of metrics
self.metrics.aggregate()
def _parse_file(self, fname, fdata, new_files_list):
try:
# parse the current file
data = fdata.read()
lines = data.splitlines()
self.metrics.begin(fname)
self.metrics.count_locs(lines)
# nosec_lines is a dict of line number -> set of tests to ignore
# for the line
nosec_lines = dict()
try:
fdata.seek(0)
tokens = tokenize.tokenize(fdata.readline)
if not self.ignore_nosec:
for toktype, tokval, (lineno, _), _, _ in tokens:
if toktype == tokenize.COMMENT:
nosec_lines[lineno] = _parse_nosec_comment(tokval)
except tokenize.TokenError:
pass
score = self._execute_ast_visitor(fname, fdata, data, nosec_lines)
self.scores.append(score)
self.metrics.count_issues([score])
except KeyboardInterrupt:
sys.exit(2)
except SyntaxError:
self.skipped.append(
(fname, "syntax error while parsing AST from file")
)
new_files_list.remove(fname)
except Exception as e:
LOG.error(
"Exception occurred when executing tests against %s.", fname
)
if not LOG.isEnabledFor(logging.DEBUG):
LOG.error(
'Run "bandit --debug %s" to see the full traceback.', fname
)
self.skipped.append((fname, "exception while scanning file"))
new_files_list.remove(fname)
LOG.debug(" Exception string: %s", e)
LOG.debug(" Exception traceback: %s", traceback.format_exc())
def _execute_ast_visitor(self, fname, fdata, data, nosec_lines):
"""Execute AST parse on each file
:param fname: The name of the file being parsed
:param data: Original file contents
:param lines: The lines of code to process
:return: The accumulated test score
"""
score = []
res = b_node_visitor.BanditNodeVisitor(
fname,
fdata,
self.b_ma,
self.b_ts,
self.debug,
nosec_lines,
self.metrics,
)
score = res.process(data)
self.results.extend(res.tester.results)
return score
def _get_files_from_dir(
files_dir, included_globs=None, excluded_path_strings=None
):
if not included_globs:
included_globs = ["*.py"]
if not excluded_path_strings:
excluded_path_strings = []
files_list = set()
excluded_files = set()
for root, _, files in os.walk(files_dir):
for filename in files:
path = os.path.join(root, filename)
if _is_file_included(path, included_globs, excluded_path_strings):
files_list.add(path)
else:
excluded_files.add(path)
return files_list, excluded_files
def _is_file_included(
path, included_globs, excluded_path_strings, enforce_glob=True
):
"""Determine if a file should be included based on filename
This utility function determines if a file should be included based
on the file name, a list of parsed extensions, excluded paths, and a flag
specifying whether extensions should be enforced.
:param path: Full path of file to check
:param parsed_extensions: List of parsed extensions
:param excluded_paths: List of paths (globbing supported) from which we
should not include files
:param enforce_glob: Can set to false to bypass extension check
:return: Boolean indicating whether a file should be included
"""
return_value = False
# if this is matches a glob of files we look at, and it isn't in an
# excluded path
if _matches_glob_list(path, included_globs) or not enforce_glob:
if not _matches_glob_list(path, excluded_path_strings) and not any(
x in path for x in excluded_path_strings
):
return_value = True
return return_value
def _matches_glob_list(filename, glob_list):
for glob in glob_list:
if fnmatch.fnmatch(filename, glob):
return True
return False
def _compare_baseline_results(baseline, results):
"""Compare a baseline list of issues to list of results
This function compares a baseline set of issues to a current set of issues
to find results that weren't present in the baseline.
:param baseline: Baseline list of issues
:param results: Current list of issues
:return: List of unmatched issues
"""
return [a for a in results if a not in baseline]
def _find_candidate_matches(unmatched_issues, results_list):
"""Returns a dictionary with issue candidates
For example, let's say we find a new command injection issue in a file
which used to have two. Bandit can't tell which of the command injection
issues in the file are new, so it will show all three. The user should
be able to pick out the new one.
:param unmatched_issues: List of issues that weren't present before
:param results_list: main list of current Bandit findings
:return: A dictionary with a list of candidates for each issue
"""
issue_candidates = collections.OrderedDict()
for unmatched in unmatched_issues:
issue_candidates[unmatched] = [
i for i in results_list if unmatched == i
]
return issue_candidates
def _find_test_id_from_nosec_string(extman, match):
test_id = extman.check_id(match)
if test_id:
return match
# Finding by short_id didn't work, let's check the test name
test_id = extman.get_test_id(match)
if not test_id:
# Name and short id didn't work:
LOG.warning(
"Test in comment: %s is not a test name or id, ignoring", match
)
return test_id # We want to return None or the string here regardless
def _parse_nosec_comment(comment):
found_no_sec_comment = NOSEC_COMMENT.search(comment)
if not found_no_sec_comment:
# there was no nosec comment
return None
matches = found_no_sec_comment.groupdict()
nosec_tests = matches.get("tests", set())
# empty set indicates that there was a nosec comment without specific
# test ids or names
test_ids = set()
if nosec_tests:
extman = extension_loader.MANAGER
# lookup tests by short code or name
for test in NOSEC_COMMENT_TESTS.finditer(nosec_tests):
test_match = test.group(1)
test_id = _find_test_id_from_nosec_string(extman, test_match)
if test_id:
test_ids.add(test_id)
return test_ids
| BanditManager |
python | dagster-io__dagster | python_modules/dagster/dagster/components/lib/shim_components/job.py | {
"start": 237,
"end": 565
} | class ____(ShimScaffolder):
def get_text(self, request: ScaffoldRequest) -> str:
return textwrap.dedent(
f"""\
import dagster as dg
@dg.job
def {request.target_path.stem}():
pass
"""
)
scaffold_with(JobScaffolder)(job)
| JobScaffolder |
python | django__django | django/db/models/expressions.py | {
"start": 51093,
"end": 52303
} | class ____(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols()
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
@property
def allowed_default(self):
return self.expression.allowed_default
| ExpressionWrapper |
python | getsentry__responses | responses/tests/test_responses.py | {
"start": 23873,
"end": 27451
} | class ____:
class CustomAdapter(requests.adapters.HTTPAdapter):
"""Classic custom adapter."""
def send(self, *a, **k):
return super().send(*a, **k)
class PositionalArgsAdapter(requests.adapters.HTTPAdapter):
"""Custom adapter that sends only positional args.
See https://github.com/getsentry/responses/issues/642 for more into.
"""
def send(
self,
request,
stream=False,
timeout=None,
verify=True,
cert=None,
proxies=None,
):
return super().send(request, stream, timeout, verify, cert, proxies)
class PositionalArgsIncompleteAdapter(requests.adapters.HTTPAdapter):
"""Custom adapter that sends only positional args.
Not all arguments are forwarded to the send method.
See https://github.com/getsentry/responses/issues/642 for more into.
"""
def send(
self,
request,
stream=False,
timeout=None,
verify=True,
# following args are intentionally not forwarded
cert=None,
proxies=None,
):
return super().send(request, stream, timeout, verify)
@pytest.mark.parametrize(
"adapter_class",
(CustomAdapter, PositionalArgsAdapter, PositionalArgsIncompleteAdapter),
)
def test_custom_adapter(self, adapter_class): # type: ignore[misc]
"""Test basic adapter implementation and that responses can patch them properly."""
@responses.activate
def run():
url = "http://example.com"
responses.add(responses.GET, url, body=b"test adapter")
# Test that the adapter is actually used
session = requests.Session()
adapter = adapter_class()
session.mount("http://", adapter)
with patch.object(adapter, "send", side_effect=adapter.send) as mock_send:
resp = session.get(url, allow_redirects=False)
assert mock_send.call_count == 1
assert_response(resp, "test adapter")
run()
def test_responses_as_context_manager():
def run():
with responses.mock:
responses.add(responses.GET, "http://example.com", body=b"test")
resp = requests.get("http://example.com")
assert_response(resp, "test")
assert len(responses.calls) == 1
assert responses.calls[0].request.url == "http://example.com/"
assert responses.calls[0].response.content == b"test"
resp = requests.get("http://example.com?foo=bar")
assert_response(resp, "test")
assert len(responses.calls) == 2
assert responses.calls[1].request.url == "http://example.com/?foo=bar"
assert responses.calls[1].response.content == b"test"
run()
assert_reset()
def test_activate_doesnt_change_signature():
def test_function(a, b=None):
return a, b
decorated_test_function = responses.activate(test_function)
assert inspect.signature(test_function) == inspect.signature(
decorated_test_function
)
assert decorated_test_function(1, 2) == test_function(1, 2)
assert decorated_test_function(3) == test_function(3)
@pytest.fixture
def my_fruit() -> str:
return "apple"
@pytest.fixture
def fruit_basket(my_fruit: str) -> "list[str]":
return ["banana", my_fruit]
@pytest.mark.usefixtures("my_fruit", "fruit_basket")
| TestAdapters |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/toolbars.py | {
"start": 1907,
"end": 5954
} | class ____:
"""
Toolbar for a system prompt.
:param prompt: Prompt to be displayed to the user.
"""
def __init__(
self,
prompt: AnyFormattedText = "Shell command: ",
enable_global_bindings: FilterOrBool = True,
) -> None:
self.prompt = prompt
self.enable_global_bindings = to_filter(enable_global_bindings)
self.system_buffer = Buffer(name=SYSTEM_BUFFER)
self._bindings = self._build_key_bindings()
self.buffer_control = BufferControl(
buffer=self.system_buffer,
lexer=SimpleLexer(style="class:system-toolbar.text"),
input_processors=[
BeforeInput(lambda: self.prompt, style="class:system-toolbar")
],
key_bindings=self._bindings,
)
self.window = Window(
self.buffer_control, height=1, style="class:system-toolbar"
)
self.container = ConditionalContainer(
content=self.window, filter=has_focus(self.system_buffer)
)
def _get_display_before_text(self) -> StyleAndTextTuples:
return [
("class:system-toolbar", "Shell command: "),
("class:system-toolbar.text", self.system_buffer.text),
("", "\n"),
]
def _build_key_bindings(self) -> KeyBindingsBase:
focused = has_focus(self.system_buffer)
# Emacs
emacs_bindings = KeyBindings()
handle = emacs_bindings.add
@handle("escape", filter=focused)
@handle("c-g", filter=focused)
@handle("c-c", filter=focused)
def _cancel(event: E) -> None:
"Hide system prompt."
self.system_buffer.reset()
event.app.layout.focus_last()
@handle("enter", filter=focused)
async def _accept(event: E) -> None:
"Run system command."
await event.app.run_system_command(
self.system_buffer.text,
display_before_text=self._get_display_before_text(),
)
self.system_buffer.reset(append_to_history=True)
event.app.layout.focus_last()
# Vi.
vi_bindings = KeyBindings()
handle = vi_bindings.add
@handle("escape", filter=focused)
@handle("c-c", filter=focused)
def _cancel_vi(event: E) -> None:
"Hide system prompt."
event.app.vi_state.input_mode = InputMode.NAVIGATION
self.system_buffer.reset()
event.app.layout.focus_last()
@handle("enter", filter=focused)
async def _accept_vi(event: E) -> None:
"Run system command."
event.app.vi_state.input_mode = InputMode.NAVIGATION
await event.app.run_system_command(
self.system_buffer.text,
display_before_text=self._get_display_before_text(),
)
self.system_buffer.reset(append_to_history=True)
event.app.layout.focus_last()
# Global bindings. (Listen to these bindings, even when this widget is
# not focussed.)
global_bindings = KeyBindings()
handle = global_bindings.add
@handle(Keys.Escape, "!", filter=~focused & emacs_mode, is_global=True)
def _focus_me(event: E) -> None:
"M-'!' will focus this user control."
event.app.layout.focus(self.window)
@handle("!", filter=~focused & vi_mode & vi_navigation_mode, is_global=True)
def _focus_me_vi(event: E) -> None:
"Focus."
event.app.vi_state.input_mode = InputMode.INSERT
event.app.layout.focus(self.window)
return merge_key_bindings(
[
ConditionalKeyBindings(emacs_bindings, emacs_mode),
ConditionalKeyBindings(vi_bindings, vi_mode),
ConditionalKeyBindings(global_bindings, self.enable_global_bindings),
]
)
def __pt_container__(self) -> Container:
return self.container
| SystemToolbar |
python | walkccc__LeetCode | solutions/1380. Lucky Numbers in a Matrix/1380.py | {
"start": 0,
"end": 246
} | class ____:
def luckyNumbers(self, matrix: list[list[int]]) -> list[int]:
for row in matrix:
minIndex = row.index(min(row))
if row[minIndex] == max(list(zip(*matrix))[minIndex]):
return [row[minIndex]]
return []
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py | {
"start": 15138,
"end": 20265
} | class ____(test.TestCase):
def _testStaticShape(self, input_shape, block_shape, paddings, error):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes known at graph construction time.
with self.assertRaises(error):
_ = array_ops.space_to_batch_nd(
np.zeros(input_shape, np.float32), block_shape, paddings)
def _testDynamicShape(self, input_shape, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings)
# Try with sizes unknown at graph construction time.
input_placeholder = array_ops.placeholder(dtypes.float32)
block_shape_placeholder = array_ops.placeholder(
dtypes.int32, shape=block_shape.shape)
paddings_placeholder = array_ops.placeholder(dtypes.int32)
t = array_ops.space_to_batch_nd(input_placeholder, block_shape_placeholder,
paddings_placeholder)
with self.assertRaises(ValueError):
_ = t.eval({
input_placeholder: np.zeros(input_shape, np.float32),
block_shape_placeholder: block_shape,
paddings_placeholder: paddings
})
def _testShape(self, input_shape, block_shape, paddings, error):
self._testStaticShape(input_shape, block_shape, paddings, error)
self._testDynamicShape(input_shape, block_shape, paddings)
@test_util.run_deprecated_v1
def testBlockSize0(self):
# The block size is 0.
self._testShape([1, 2, 2], [0, 2], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeNegative(self):
self._testShape([1, 2, 2], [-1, 2], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testNegativePadding(self):
# The padding is negative.
self._testShape([1, 2, 2], [1, 1], [[0, -1], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockSizeNotDivisible(self):
# The padded size is not divisible by the block size.
self._testShape([1, 2, 3, 1], [3, 3], [[0, 0], [0, 0]], ValueError)
@test_util.run_deprecated_v1
def testBlockDimsMismatch(self):
# Shape of block_shape does not match shape of paddings.
self._testStaticShape([1, 3, 3, 1], [3, 3], [[0, 0]], ValueError)
@test_util.run_deprecated_v1
def testUnknown(self):
# Verify that input shape and paddings shape can be unknown.
_ = array_ops.space_to_batch_nd(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
# Only number of input dimensions is known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, None)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual(4, t.get_shape().ndims)
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(None, None, None, 2)),
array_ops.placeholder(
dtypes.int32, shape=(2,)),
array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, None, None, 2)), [2, 3],
array_ops.placeholder(dtypes.int32))
self.assertEqual([3 * 2 * 3, None, None, 2], t.get_shape().as_list())
# Dimensions are partially known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, None, 2, 2)), [2, 3], [[1, 1], [0, 1]])
self.assertEqual([3 * 2 * 3, None, 1, 2], t.get_shape().as_list())
# Dimensions are fully known.
t = array_ops.space_to_batch_nd(
array_ops.placeholder(
dtypes.float32, shape=(3, 2, 3, 2)), [2, 3], [[1, 1], [0, 0]])
self.assertEqual([3 * 2 * 3, 2, 1, 2], t.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes
def testInvalidBlockShape(self):
tf_in = constant_op.constant(
-3.5e+35, shape=[10, 20, 20], dtype=dtypes.float32)
block_shape = constant_op.constant(-10, shape=[2], dtype=dtypes.int64)
paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"block_shape must be positive"):
array_ops.space_to_batch_nd(tf_in, block_shape, paddings)
@test_util.run_in_graph_and_eager_modes
def testOutputSizeOutOfBounds(self):
tf_in = constant_op.constant(
-3.5e+35, shape=[10, 19, 22], dtype=dtypes.float32)
block_shape = constant_op.constant(
1879048192, shape=[2], dtype=dtypes.int64)
paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Negative.* dimension size caused by overflow"):
array_ops.space_to_batch_nd(tf_in, block_shape, paddings)
| SpaceToBatchNDErrorHandlingTest |
python | anthropics__anthropic-sdk-python | src/anthropic/types/server_tool_use_block.py | {
"start": 218,
"end": 368
} | class ____(BaseModel):
id: str
input: Dict[str, object]
name: Literal["web_search"]
type: Literal["server_tool_use"]
| ServerToolUseBlock |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 708,
"end": 2332
} | class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
import builtins
import contextlib
import copy
import gc
import operator
import pickle
import re
from random import randrange, shuffle
import struct
import sys
import unittest
import weakref
from collections.abc import MutableMapping
from test import mapping_tests, support
from test.support import import_helper, suppress_immortalization
py_coll = import_helper.import_fresh_module('collections',
blocked=['_collections'])
c_coll = import_helper.import_fresh_module('collections',
fresh=['_collections'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
| RedirectImportFinder |
python | apache__airflow | airflow-ctl/tests/airflow_ctl/ctl/commands/test_pool_command.py | {
"start": 4341,
"end": 7520
} | class ____:
"""Test cases for pool export command."""
def test_export_json_to_file(self, mock_client, tmp_path, capsys):
"""Test successful pool export to file with json output."""
export_file = tmp_path / "export.json"
# Create a proper pool object with dictionary attributes instead of MagicMock
pool = {
"name": "test_pool",
"slots": 1,
"description": "Test pool",
"include_deferred": True,
"occupied_slots": 0,
"running_slots": 0,
"queued_slots": 0,
"scheduled_slots": 0,
"open_slots": 1,
"deferred_slots": 0,
}
# Create a mock response with proper dictionary attributes
mock_pools = mock.MagicMock()
mock_pools.pools = [type("Pool", (), pool)()]
mock_pools.total_entries = 1
mock_client.pools.list.return_value = mock_pools
pool_command.export(args=mock.MagicMock(file=export_file, output="json"))
# Verify the exported file content
exported_data = json.loads(export_file.read_text())
assert len(exported_data) == 1
assert exported_data[0]["name"] == "test_pool"
assert exported_data[0]["slots"] == 1
assert exported_data[0]["description"] == "Test pool"
assert exported_data[0]["include_deferred"] is True
# Verify output message
captured = capsys.readouterr()
expected_output = f"Exported {len(exported_data)} pool(s) to {export_file}"
assert expected_output in captured.out.replace("\n", "")
def test_export_non_json_output(self, mock_client, tmp_path, capsys):
"""Test pool export with non-json output format."""
# Create a proper dictionary structure
mock_pool = {
"name": "test_pool",
"slots": 1,
"description": "Test pool",
"include_deferred": True,
"occupied_slots": 0,
"running_slots": 0,
"queued_slots": 0,
"scheduled_slots": 0,
"open_slots": 1,
"deferred_slots": 0,
}
# Create a mock response with a proper pools attribute
mock_pools = mock.MagicMock()
mock_pools.pools = [mock.MagicMock(**mock_pool)]
mock_pools.total_entries = 1
mock_client.pools.list.return_value = mock_pools
pool_command.export(args=mock.MagicMock(file=tmp_path / "unused.json", output="table"))
# Verify console output contains the raw dict
captured = capsys.readouterr()
assert "test_pool" in captured.out
assert "slots" in captured.out
assert "description" in captured.out
assert "include_deferred" in captured.out
def test_export_failure(self, mock_client, tmp_path):
"""Test pool export with API failure."""
export_file = tmp_path / "export.json"
mock_client.pools.list.side_effect = Exception("API Error")
with pytest.raises(SystemExit, match="Failed to export pools: API Error"):
pool_command.export(args=mock.MagicMock(file=export_file, output="json"))
| TestPoolExportCommand |
python | google__jax | tests/transfer_guard_test.py | {
"start": 3545,
"end": 8642
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
# Nearly all test methods use the deprecated device argument to JIT.
self.enter_context(jtu.ignore_warning(category=DeprecationWarning,
message="backend and device argument"))
@contextlib.contextmanager
def assertAllows(self, func_name):
"""Asserts that a transfer in the context is allowed."""
try:
yield
except Exception as e: # pylint: disable=broad-except
raise RuntimeError(
f"Expected a transfer to be allowed while running: {func_name}"
) from e
@contextlib.contextmanager
def assertLogs(self, func_name):
"""Asserts that a transfer in the context is logged and allowed."""
# Only check if the transfer is allowed until Abseil provides an interface
# to capture logs.
with self.assertAllows(func_name):
yield
@contextlib.contextmanager
def assertDisallows(self, func_name):
"""Asserts that a transfer in the context is disallowed."""
try:
with self.assertRaises(Exception):
yield
except Exception as e: # pylint: disable=broad-except
raise RuntimeError(
f"Expected a transfer to be disallowed while running: {func_name}"
) from e
def test_simple(self):
"""Simple transfer guard tests."""
with jax.transfer_guard("allow"):
with self.assertAllows("host_to_device_jnp_ones"):
jnp.ones(1)
with jax.transfer_guard("log"):
with self.assertLogs("host_to_device_jnp_ones"):
jnp.ones(1)
with jax.transfer_guard("disallow"):
with self.assertDisallows("host_to_device_jnp_ones"):
jnp.ones(1)
def test_nesting(self):
with jax.transfer_guard("disallow"):
with jax.transfer_guard("allow"):
with self.assertAllows("host_to_device_jnp_ones"):
jnp.ones(1)
with self.assertDisallows("host_to_device_jnp_ones"):
jnp.ones(1)
def test_mixed_nesting(self):
with jax.transfer_guard_host_to_device("disallow"):
with jax.transfer_guard("allow"):
with self.assertAllows("host_to_device_jnp_ones"):
jnp.ones(1)
with self.assertDisallows("host_to_device_jnp_ones"):
jnp.ones(1)
with jax.transfer_guard("disallow"):
with jax.transfer_guard_host_to_device("allow"):
with self.assertAllows("host_to_device_jnp_ones"):
jnp.ones(1)
with self.assertDisallows("host_to_device_jnp_ones"):
jnp.ones(1)
@parameterized.named_parameters(*_COMMON_TEST_PARAMETERS)
def test_allow_by_default(self, func_generator, _):
for func_name, _, func in func_generator():
with self.assertAllows(func_name):
func()
@parameterized.named_parameters(*_COMMON_TEST_PARAMETERS)
def test_allow(self, func_generator, jax_transfer_guard):
for func_name, _, func in func_generator():
with jax_transfer_guard("allow"):
with self.assertAllows(func_name):
func()
@parameterized.named_parameters(*_COMMON_TEST_PARAMETERS)
def test_log(self, func_generator, jax_transfer_guard):
for func_name, explicit, func in func_generator():
with jax_transfer_guard("log"):
if explicit:
with self.assertAllows(func_name):
func()
else:
with self.assertLogs(func_name):
func()
@parameterized.named_parameters(*_COMMON_TEST_PARAMETERS)
def test_disallow(self, func_generator, jax_transfer_guard):
for func_name, explicit, func in func_generator():
with jax_transfer_guard("disallow"):
if explicit:
with self.assertAllows(func_name):
func()
else:
with self.assertDisallows(func_name):
func()
@parameterized.named_parameters(
("device_to_host", _device_to_host_funcs,
jax.transfer_guard_device_to_host),
("all", _device_to_host_funcs, jax.transfer_guard),
)
def test_disallow_ignores_arrays_on_cpu(self, func_generator,
jax_transfer_guard):
for func_name, _, func in func_generator():
with jax_transfer_guard("allow"):
# Transfer the device array to host.
func()
with jax_transfer_guard("disallow"):
with self.assertAllows(func_name):
# No error because the array has a value on host and no new transfer
# will occur.
func()
@parameterized.named_parameters(*_COMMON_TEST_PARAMETERS)
def test_log_explicit(self, func_generator, jax_transfer_guard):
for func_name, _, func in func_generator():
with jax_transfer_guard("log_explicit"):
with self.assertLogs(func_name):
func()
@parameterized.named_parameters(*_COMMON_TEST_PARAMETERS)
def test_disallow_explicit(self, func_generator, jax_transfer_guard):
for func_name, _, func in func_generator():
with jax_transfer_guard("disallow_explicit"):
with self.assertDisallows(func_name):
func()
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| TransferGuardTest |
python | getsentry__sentry | tests/sentry/deletions/test_sentry_installation_tokens.py | {
"start": 369,
"end": 1295
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.org = self.create_organization(owner=self.user)
self.create_project(organization=self.org)
self.sentry_app = self.create_internal_integration(name="nulldb", organization=self.org)
self.sentry_app_installation = SentryAppInstallation.objects.get(sentry_app=self.sentry_app)
self.api_token = self.create_internal_integration_token(
install=self.sentry_app_installation, user=self.user
)
def test_delete_token_without_audit(self) -> None:
deletions.exec_sync(SentryAppInstallationToken.objects.get(api_token=self.api_token))
assert not ApiToken.objects.filter(id=self.api_token.id).exists()
assert not SentryAppInstallationToken.objects.filter(
api_token_id=self.api_token.id
).exists()
| TestSentryInstallationTokenDeletionTask |
python | sympy__sympy | sympy/functions/special/hyper.py | {
"start": 29967,
"end": 30711
} | class ____(HyperRep):
""" Return a representative for hyper([a, a - 1/2], [2*a], z). """
@classmethod
def _expr_small(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 - x))**(1 - 2*a)
@classmethod
def _expr_small_minus(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 + x))**(1 - 2*a)
@classmethod
def _expr_big(cls, a, x, n):
sgn = -1
if n.is_odd:
sgn = 1
n -= 1
return 2**(2*a - 1)*(1 + sgn*I*sqrt(x - 1))**(1 - 2*a) \
*exp(-2*n*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
sgn = 1
if n.is_odd:
sgn = -1
return sgn*2**(2*a - 1)*(sqrt(1 + x) + sgn)**(1 - 2*a)*exp(-2*pi*I*a*n)
| HyperRep_power2 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1248470,
"end": 1248721
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a org.config.disable_collaborators_only event."""
__schema__ = github_schema
__field_names__ = ()
| OrgConfigDisableCollaboratorsOnlyAuditEntry |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/base.py | {
"start": 7015,
"end": 7157
} | class ____(Enum):
"""Symbols indicating the type of extension that a
:class:`.InspectionAttr` is part of."""
| InspectionAttrExtensionType |
python | django__django | django/db/backends/oracle/introspection.py | {
"start": 494,
"end": 15939
} | class ____(BaseDatabaseIntrospection):
cache_bust_counter = 1
# Maps type objects to Django Field types.
data_types_reverse = {
oracledb.DB_TYPE_DATE: "DateField",
oracledb.DB_TYPE_BINARY_DOUBLE: "FloatField",
oracledb.DB_TYPE_BLOB: "BinaryField",
oracledb.DB_TYPE_CHAR: "CharField",
oracledb.DB_TYPE_CLOB: "TextField",
oracledb.DB_TYPE_INTERVAL_DS: "DurationField",
oracledb.DB_TYPE_NCHAR: "CharField",
oracledb.DB_TYPE_NCLOB: "TextField",
oracledb.DB_TYPE_NVARCHAR: "CharField",
oracledb.DB_TYPE_NUMBER: "DecimalField",
oracledb.DB_TYPE_TIMESTAMP: "DateTimeField",
oracledb.DB_TYPE_VARCHAR: "CharField",
}
def get_field_type(self, data_type, description):
if data_type == oracledb.NUMBER:
precision, scale = description[4:6]
if scale == 0:
if precision > 11:
return (
"BigAutoField"
if description.is_autofield
else "BigIntegerField"
)
elif 1 < precision < 6 and description.is_autofield:
return "SmallAutoField"
elif precision == 1:
return "BooleanField"
elif description.is_autofield:
return "AutoField"
else:
return "IntegerField"
elif scale == -127:
return "FloatField"
elif data_type == oracledb.NCLOB and description.is_json:
return "JSONField"
return super().get_field_type(data_type, description)
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute(
"""
SELECT
user_tables.table_name,
't',
user_tab_comments.comments
FROM user_tables
LEFT OUTER JOIN
user_tab_comments
ON user_tab_comments.table_name = user_tables.table_name
WHERE
NOT EXISTS (
SELECT 1
FROM user_mviews
WHERE user_mviews.mview_name = user_tables.table_name
)
UNION ALL
SELECT view_name, 'v', NULL FROM user_views
UNION ALL
SELECT mview_name, 'v', NULL FROM user_mviews
"""
)
return [
TableInfo(self.identifier_converter(row[0]), row[1], row[2])
for row in cursor.fetchall()
]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# A default collation for the given table/view/materialized view.
cursor.execute(
"""
SELECT user_tables.default_collation
FROM user_tables
WHERE
user_tables.table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_mviews
WHERE user_mviews.mview_name = user_tables.table_name
)
UNION ALL
SELECT user_views.default_collation
FROM user_views
WHERE user_views.view_name = UPPER(%s)
UNION ALL
SELECT user_mviews.default_collation
FROM user_mviews
WHERE user_mviews.mview_name = UPPER(%s)
""",
[table_name, table_name, table_name],
)
row = cursor.fetchone()
default_table_collation = row[0] if row else ""
# user_tab_columns gives data default for columns
cursor.execute(
"""
SELECT
user_tab_cols.column_name,
user_tab_cols.data_default,
CASE
WHEN user_tab_cols.collation = %s
THEN NULL
ELSE user_tab_cols.collation
END collation,
CASE
WHEN user_tab_cols.char_used IS NULL
THEN user_tab_cols.data_length
ELSE user_tab_cols.char_length
END as display_size,
CASE
WHEN user_tab_cols.identity_column = 'YES' THEN 1
ELSE 0
END as is_autofield,
CASE
WHEN EXISTS (
SELECT 1
FROM user_json_columns
WHERE
user_json_columns.table_name = user_tab_cols.table_name AND
user_json_columns.column_name = user_tab_cols.column_name
)
THEN 1
ELSE 0
END as is_json,
user_col_comments.comments as col_comment
FROM user_tab_cols
LEFT OUTER JOIN
user_col_comments ON
user_col_comments.column_name = user_tab_cols.column_name AND
user_col_comments.table_name = user_tab_cols.table_name
WHERE user_tab_cols.table_name = UPPER(%s)
""",
[default_table_collation, table_name],
)
field_map = {
column: (
display_size,
default.rstrip() if default and default != "NULL" else None,
collation,
is_autofield,
is_json,
comment,
)
for (
column,
default,
collation,
display_size,
is_autofield,
is_json,
comment,
) in cursor.fetchall()
}
self.cache_bust_counter += 1
cursor.execute(
"SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format(
self.connection.ops.quote_name(table_name), self.cache_bust_counter
)
)
description = []
for desc in cursor.description:
name = desc[0]
(
display_size,
default,
collation,
is_autofield,
is_json,
comment,
) = field_map[name]
name %= {} # oracledb, for some reason, doubles percent signs.
if desc[1] == oracledb.NUMBER and desc[5] == -127 and desc[4] == 0:
# DecimalField with no precision.
precision = None
scale = None
else:
precision = desc[4] or 0
scale = desc[5] or 0
description.append(
FieldInfo(
self.identifier_converter(name),
desc[1],
display_size,
desc[3],
precision,
scale,
*desc[6:],
default,
collation,
is_autofield,
is_json,
comment,
)
)
return description
def identifier_converter(self, name):
"""Identifier comparison is case insensitive under Oracle."""
return name.lower()
def get_sequences(self, cursor, table_name, table_fields=()):
cursor.execute(
"""
SELECT
user_tab_identity_cols.sequence_name,
user_tab_identity_cols.column_name
FROM
user_tab_identity_cols,
user_constraints,
user_cons_columns cols
WHERE
user_constraints.constraint_name = cols.constraint_name
AND user_constraints.table_name = user_tab_identity_cols.table_name
AND cols.column_name = user_tab_identity_cols.column_name
AND user_constraints.constraint_type = 'P'
AND user_tab_identity_cols.table_name = UPPER(%s)
""",
[table_name],
)
# Oracle allows only one identity column per table.
row = cursor.fetchone()
if row:
return [
{
"name": self.identifier_converter(row[0]),
"table": self.identifier_converter(table_name),
"column": self.identifier_converter(row[1]),
}
]
# To keep backward compatibility for AutoFields that aren't Oracle
# identity columns.
for f in table_fields:
if isinstance(f, models.AutoField):
return [{"table": table_name, "column": f.column}]
return []
def get_relations(self, cursor, table_name):
"""
Return a dictionary of
{
field_name: (field_name_other_table, other_table, db_on_delete)
}
representing all foreign keys in the given table.
"""
table_name = table_name.upper()
cursor.execute(
"""
SELECT ca.column_name, cb.table_name, cb.column_name, user_constraints.delete_rule
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb
WHERE user_constraints.table_name = %s AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
ca.position = cb.position""",
[table_name],
)
return {
self.identifier_converter(field_name): (
self.identifier_converter(rel_field_name),
self.identifier_converter(rel_table_name),
self.on_delete_types.get(on_delete),
)
for (
field_name,
rel_table_name,
rel_field_name,
on_delete,
) in cursor.fetchall()
}
def get_primary_key_columns(self, cursor, table_name):
cursor.execute(
"""
SELECT
cols.column_name
FROM
user_constraints,
user_cons_columns cols
WHERE
user_constraints.constraint_name = cols.constraint_name AND
user_constraints.constraint_type = 'P' AND
user_constraints.table_name = UPPER(%s)
ORDER BY
cols.position
""",
[table_name],
)
return [self.identifier_converter(row[0]) for row in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs, uniques, and checks
cursor.execute(
"""
SELECT
user_constraints.constraint_name,
LISTAGG(LOWER(cols.column_name), ',')
WITHIN GROUP (ORDER BY cols.position),
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE
WHEN user_constraints.constraint_type IN ('P', 'U') THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint
FROM
user_constraints
LEFT OUTER JOIN
user_cons_columns cols
ON user_constraints.constraint_name = cols.constraint_name
WHERE
user_constraints.constraint_type = ANY('P', 'U', 'C')
AND user_constraints.table_name = UPPER(%s)
GROUP BY user_constraints.constraint_name, user_constraints.constraint_type
""",
[table_name],
)
for constraint, columns, pk, unique, check in cursor.fetchall():
constraint = self.identifier_converter(constraint)
constraints[constraint] = {
"columns": columns.split(","),
"primary_key": pk,
"unique": unique,
"foreign_key": None,
"check": check,
"index": unique, # All uniques come with an index
}
# Foreign key constraints
cursor.execute(
"""
SELECT
cons.constraint_name,
LISTAGG(LOWER(cols.column_name), ',')
WITHIN GROUP (ORDER BY cols.position),
LOWER(rcols.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_cons_columns rcols
ON rcols.constraint_name = cons.r_constraint_name AND rcols.position = 1
LEFT OUTER JOIN
user_cons_columns cols
ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
GROUP BY cons.constraint_name, rcols.table_name, rcols.column_name
""",
[table_name],
)
for constraint, columns, other_table, other_column in cursor.fetchall():
constraint = self.identifier_converter(constraint)
constraints[constraint] = {
"primary_key": False,
"unique": False,
"foreign_key": (other_table, other_column),
"check": False,
"index": False,
"columns": columns.split(","),
}
# Now get indexes
cursor.execute(
"""
SELECT
ind.index_name,
LOWER(ind.index_type),
LOWER(ind.uniqueness),
LISTAGG(LOWER(cols.column_name), ',')
WITHIN GROUP (ORDER BY cols.column_position),
LISTAGG(cols.descend, ',') WITHIN GROUP (ORDER BY cols.column_position)
FROM
user_ind_columns cols, user_indexes ind
WHERE
cols.table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE ind.index_name = cons.index_name
) AND cols.index_name = ind.index_name
GROUP BY ind.index_name, ind.index_type, ind.uniqueness
""",
[table_name],
)
for constraint, type_, unique, columns, orders in cursor.fetchall():
constraint = self.identifier_converter(constraint)
constraints[constraint] = {
"primary_key": False,
"unique": unique == "unique",
"foreign_key": None,
"check": False,
"index": True,
"type": "idx" if type_ == "normal" else type_,
"columns": columns.split(","),
"orders": orders.split(","),
}
return constraints
| DatabaseIntrospection |
python | numba__numba | numba/cuda/tests/cudadrv/test_cuda_devicerecord.py | {
"start": 3808,
"end": 5766
} | class ____(CUDATestCase):
'''
Test operation of device arrays on structured arrays.
'''
def _createSampleArrays(self):
self.sample1d = cuda.device_array(3, dtype=recordtype)
self.samplerec1darr = cuda.device_array(1, dtype=recordwitharray)[0]
self.samplerecmat = cuda.device_array(1,dtype=recwithmat)[0]
def setUp(self):
super().setUp()
self._createSampleArrays()
ary = self.sample1d
for i in range(ary.size):
x = i + 1
ary[i]['a'] = x / 2
ary[i]['b'] = x
ary[i]['c'] = x * 1j
ary[i]['d'] = str(x) * N_CHARS
def test_structured_array1(self):
ary = self.sample1d
for i in range(self.sample1d.size):
x = i + 1
self.assertEqual(ary[i]['a'], x / 2)
self.assertEqual(ary[i]['b'], x)
self.assertEqual(ary[i]['c'], x * 1j)
self.assertEqual(ary[i]['d'], str(x) * N_CHARS)
def test_structured_array2(self):
ary = self.samplerec1darr
ary['g'] = 2
ary['h'][0] = 3.0
ary['h'][1] = 4.0
self.assertEqual(ary['g'], 2)
self.assertEqual(ary['h'][0], 3.0)
self.assertEqual(ary['h'][1], 4.0)
def test_structured_array3(self):
ary = self.samplerecmat
mat = np.array([[5.0, 10.0, 15.0],
[20.0, 25.0, 30.0],
[35.0, 40.0, 45.0]],
dtype=np.float32).reshape(3,3)
ary['j'][:] = mat
np.testing.assert_equal(ary['j'], mat)
def test_structured_array4(self):
arr = np.zeros(1, dtype=recwithrecwithmat)
d_arr = cuda.to_device(arr)
d_arr[0]['y']['i'] = 1
self.assertEqual(d_arr[0]['y']['i'], 1)
d_arr[0]['y']['j'][0, 0] = 2.0
self.assertEqual(d_arr[0]['y']['j'][0, 0], 2.0)
if __name__ == '__main__':
unittest.main()
| TestRecordDtypeWithStructArrays |
python | sanic-org__sanic | sanic/headers.py | {
"start": 5508,
"end": 7979
} | class ____:
"""A matching result of a MIME string against a header.
This class is a representation of a matching result of a MIME string
against a header. It encapsulates the MIME string, the header, and
provides methods for matching against other MIME strings.
Args:
mime (str): The MIME string to match.
header (MediaType): The header to match against, if any.
"""
def __init__(self, mime: str, header: Optional[MediaType]):
self.mime = mime
self.header = header
def __repr__(self):
return f"<{self} matched {self.header}>" if self else "<no match>"
def __str__(self):
return self.mime
def __bool__(self):
return self.header is not None
def __eq__(self, other: Any) -> bool:
try:
comp, other_accept = self._compare(other)
except TypeError:
return False
return bool(
comp
and (
(self.header and other_accept.header)
or (not self.header and not other_accept.header)
)
)
def _compare(self, other) -> tuple[bool, Matched]:
if isinstance(other, str):
parsed = Matched.parse(other)
if self.mime == other:
return True, parsed
other = parsed
if isinstance(other, Matched):
return self.header == other.header, other
raise TypeError(
"Comparison not supported between unequal "
f"mime types of '{self.mime}' and '{other}'"
)
def match(self, other: Union[str, Matched]) -> Optional[Matched]:
"""Match this MIME string against another MIME string.
Check if this MIME string matches the given MIME string. Wildcards are supported both ways on both type and subtype.
Args:
other (str): A MIME string to match.
Returns:
Matched: Returns `self` if the MIME strings are compatible.
None: Returns `None` if the MIME strings are not compatible.
""" # noqa: E501
accept = Matched.parse(other) if isinstance(other, str) else other
if not self.header or not accept.header:
return None
if self.header.match(accept.header):
return accept
return None
@classmethod
def parse(cls, raw: str) -> Matched:
media_type = MediaType._parse(raw)
return cls(raw, media_type)
| Matched |
python | ansible__ansible | test/units/plugins/cache/test_cache.py | {
"start": 3305,
"end": 4620
} | class ____(TestCachePluginAdjudicator):
cache_prefix = ''
def setUp(self):
self.cache_dir = tempfile.mkdtemp(prefix='ansible-plugins-cache-')
self.cache = self.get_cache(self.cache_prefix)
self.cache['cache_key'] = {'key1': 'value1', 'key2': 'value2'}
self.cache['cache_key_2'] = {'key': 'value'}
def get_cache(self, prefix):
return CachePluginAdjudicator(
plugin_name='jsonfile', _uri=self.cache_dir,
_prefix=prefix)
def test_keys(self):
# A cache without a prefix will consider all files in the cache
# directory as valid cache entries.
cache_writer = self.get_cache(self.cache_prefix)
cache_writer["no_prefix"] = dict(a=1)
cache_writer["special_test"] = dict(b=2)
cache_writer.update_cache_if_changed()
# The plugin does not know the CachePluginAdjudicator entries.
assert sorted(self.cache._plugin.keys()) == [
'no_prefix', 'special_test']
assert 'no_prefix' in self.cache
assert 'special_test' in self.cache
assert 'test' not in self.cache
assert self.cache['no_prefix'] == dict(a=1)
assert self.cache['special_test'] == dict(b=2)
def tearDown(self):
shutil.rmtree(self.cache_dir)
| TestJsonFileCache |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-webflow/source_webflow/source.py | {
"start": 1429,
"end": 2425
} | class ____(HttpStream, ABC):
"""
This class represents a stream output by the connector.
This is an abstract base class meant to contain all the common functionality at the API level e.g: the API base URL,
pagination strategy, parsing responses etc..
Each stream should extend this class (or another abstract subclass of it) to specify behavior unique to that stream.
"""
url_base = "https://api.webflow.com/"
# The following call is need to fix what appears to be a bug in http.py line 119
# Bug reported at: https://github.com/airbytehq/airbyte/issues/13283
@property
def authenticator(self) -> WebflowTokenAuthenticator:
return self._session.auth
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
"""
Common params e.g. pagination size etc.
"""
return {}
| WebflowStream |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py | {
"start": 25062,
"end": 25406
} | class ____(MetadataValue[float]):
"""Container class for metadata value that's a unix timestamp.
Args:
value (float): Seconds since the unix epoch.
"""
value: PublicAttr[float] # type: ignore
@whitelist_for_serdes(storage_name="DagsterPipelineRunMetadataEntryData")
@public
@record(kw_only=False)
| TimestampMetadataValue |
python | ray-project__ray | rllib/examples/envs/classes/mock_env.py | {
"start": 171,
"end": 842
} | class ____(gym.Env):
"""Mock environment for testing purposes.
Observation=0, reward=1.0, episode-len is configurable.
Actions are ignored.
"""
def __init__(self, episode_length, config=None):
self.episode_length = episode_length
self.config = config
self.i = 0
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self, *, seed=None, options=None):
self.i = 0
return 0, {}
def step(self, action):
self.i += 1
terminated = truncated = self.i >= self.episode_length
return 0, 1.0, terminated, truncated, {}
| MockEnv |
python | django__django | tests/queries/tests.py | {
"start": 173450,
"end": 174560
} | class ____(TestCase):
def test_values_no_promotion_for_existing(self):
qs = Node.objects.filter(parent__parent__isnull=False)
self.assertIn(" INNER JOIN ", str(qs.query))
qs = qs.values("parent__parent__id")
self.assertIn(" INNER JOIN ", str(qs.query))
# Make sure there is a left outer join without the filter.
qs = Node.objects.values("parent__parent__id")
self.assertIn(" LEFT OUTER JOIN ", str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = ObjectB.objects.values("objecta__name")
self.assertIn(" INNER JOIN ", str(qs.query))
def test_ticket_21376(self):
a = ObjectA.objects.create()
ObjectC.objects.create(objecta=a)
qs = ObjectC.objects.filter(
Q(objecta=a) | Q(objectb__objecta=a),
)
qs = qs.filter(
Q(objectb=1) | Q(objecta=a),
)
self.assertEqual(qs.count(), 1)
tblname = connection.ops.quote_name(ObjectB._meta.db_table)
self.assertIn(" LEFT OUTER JOIN %s" % tblname, str(qs.query))
| ValuesJoinPromotionTests |
python | tqdm__tqdm | tests/tests_synchronisation.py | {
"start": 783,
"end": 2208
} | class ____(Event):
"""patched `threading.Event` where `wait()` uses `Time.fake_sleep()`"""
def wait(self, timeout=None):
"""uses Time.fake_sleep"""
if timeout is not None:
Time.fake_sleep(timeout)
return self.is_set()
def patch_sleep(func):
"""Temporarily makes TMonitor use Time.fake_sleep"""
@wraps(func)
def inner(*args, **kwargs):
"""restores TMonitor on completion regardless of Exceptions"""
TMonitor._test["time"] = Time.time
TMonitor._test["Event"] = FakeEvent
if tqdm.monitor:
assert not tqdm.monitor.get_instances()
tqdm.monitor.exit()
del tqdm.monitor
tqdm.monitor = None
try:
return func(*args, **kwargs)
finally:
# Check that class var monitor is deleted if no instance left
tqdm.monitor_interval = 10
if tqdm.monitor:
assert not tqdm.monitor.get_instances()
tqdm.monitor.exit()
del tqdm.monitor
tqdm.monitor = None
TMonitor._test.pop("Event")
TMonitor._test.pop("time")
return inner
def cpu_timify(t, timer=Time):
"""Force tqdm to use the specified timer instead of system-wide time"""
t._time = timer.time
t._sleep = timer.fake_sleep
t.start_t = t.last_print_t = t._time()
return timer
| FakeEvent |
python | pytorch__pytorch | torch/_higher_order_ops/utils.py | {
"start": 41064,
"end": 47079
} | class ____:
def __init__(self, op: HigherOrderOperator, schema: HopSchema):
assert isinstance(op, HigherOrderOperator), op
self._op = op
# Using "_" to be consistent with how we access _schema of OpOverload
self._schema = schema
def __call__(self, *args, **kwargs):
return self._op(*args, **kwargs)
@staticmethod
def create(hop: HigherOrderOperator, *args, **kwargs):
return HopInstance(hop, hop.gen_schema(*args, **kwargs))
# This call_op can be used to call a HopInstance with
# flat args and kwargs. We need to make use of the hop's schema's tree_spec
# to unflatten the args and kwargs before calling the hop.
def call_op(op: Union[OpOverload, HopInstance], args, kwargs):
if isinstance(op, OpOverload):
return op(*args, **kwargs)
assert isinstance(op, HopInstance), op
schema = op._schema
bound_args = list(args)
bound_kwargs = {}
for arg in schema.arguments[len(bound_args) :]:
assert arg.name in kwargs, (arg.name, kwargs)
val = kwargs[arg.name]
if not arg.kwarg_only:
bound_args.append(val)
else:
bound_kwargs[arg.name] = val
if schema.tree_spec is not None:
assert len(bound_args) == len(schema.arguments) and len(bound_kwargs) == 0
args, kwargs = pytree.tree_unflatten(bound_args, schema.tree_spec)
return op(*args, **kwargs)
else:
assert len(bound_args) + len(bound_kwargs) == len(schema.arguments)
return op(*bound_args, **bound_kwargs)
def materialize_as_graph(
fn: Callable,
args: tuple[Any, ...],
include_key_set: Optional[torch._C.DispatchKeySet] = None,
exclude_key_set: Optional[torch._C.DispatchKeySet] = None,
force_enable_grad=False,
) -> torch.fx.GraphModule:
if include_key_set is None:
include_key_set = torch._C._dispatch_tls_local_include_set()
if exclude_key_set is None:
exclude_key_set = torch._C._dispatch_tls_local_exclude_set()
@torch._dynamo.disable(recursive=True, reason=None)
def _materialize_as_graph_inner():
with suspend_functionalization(), disable_functional_mode():
with disable_proxy_modes_tracing():
unfunc_t = [_from_fun(arg) for arg in args]
with contextlib.ExitStack() as stack:
stack.enter_context(
torch.utils._python_dispatch._disable_current_modes()
)
stack.enter_context(
torch._C._ForceDispatchKeyGuard(include_key_set, exclude_key_set),
)
if force_enable_grad:
stack.enter_context(torch.enable_grad())
# fake_mode is needed because parent tracer's fake_mode might
# be None but the associated inputs have fake mode or there
# is a global tracing context with fake mode. We nneed to
# make sure the fake mode when tracing subgraph is consistent.
if fake_mode := detect_fake_mode(unfunc_t):
stack.enter_context(fake_mode)
return _maybe_reenter_make_fx(fn)(*unfunc_t)
gm = _materialize_as_graph_inner()
assert gm is not None
return gm
def materialize_callable_in_args(op: HopInstance, args, kwargs):
schema = op._schema
hop = op._op
flat_args, flat_spec = pytree.tree_flatten((args, kwargs))
def wrapped_fn(*flat_args):
return call_op(op, args, kwargs)
# We need to trace the higher order op in order to materilaize the callable inputs that
# are a callable (e.g. after functionalization key)
gm = reenter_make_fx(wrapped_fn)(*flat_args)
hop_node = gm.graph.find_nodes(op="call_function", target=hop)[0]
arg_proxies = pytree.tree_leaves((hop_node.args, hop_node.kwargs))
assert isinstance(schema, torch._C.FunctionSchema) and len(arg_proxies) == len(
schema.arguments
)
# call_op preserves ordering of proxies via schema
materialized_args = []
for i, proxy in enumerate(arg_proxies):
if (
isinstance(proxy, torch.fx.Node)
and proxy.op == "get_attr"
and isinstance(getattr(gm, proxy.target), torch.fx.GraphModule) # type: ignore[arg-type]
):
assert callable(flat_args[i]), (schema, args, kwargs)
materialized_args.append(getattr(gm, proxy.target)) # type: ignore[arg-type]
else:
materialized_args.append(flat_args[i])
return pytree.tree_unflatten(materialized_args, flat_spec)
def has_user_subclass(args, allowed_subclasses):
"""Check if any tensor arguments are user subclasses.
This is used to determine if tensor subclasses should get a chance to run
their own implementation first before falling back to the default implementation.
Args:
args: Arguments to check (will be flattened with pytree)
allowed_subclasses: Tuple of allowed subclass types
Returns:
True if user tensor subclasses are found, False otherwise
"""
flat_args, _ = pytree.tree_flatten(args)
val = any(
isinstance(a, torch.Tensor)
and type(a) is not torch.Tensor
and not isinstance(a, allowed_subclasses)
for a in flat_args
)
return val
def _has_gen_schema(op: HigherOrderOperator):
# There is an InvokeQuant argument we cannot gen_schema.
if op is torch.ops.higher_order.invoke_quant_packed:
return False
method = "gen_schema"
return hasattr(type(op), method) and getattr(type(op), method) is not getattr(
HigherOrderOperator, method
)
def filter_with_masks(data: list[Optional[torch.Tensor]], masks: list[bool]):
assert len(data) == len(masks)
return [item for item, keep in zip(data, masks) if keep]
def fill_none_with_masks(data: list[Optional[torch.Tensor]], masks: list[bool]):
data_iter = iter(data)
return [next(data_iter) if kept else None for kept in masks]
| HopInstance |
python | python-visualization__folium | folium/plugins/overlapping_marker_spiderfier.py | {
"start": 195,
"end": 3543
} | class ____(JSCSSMixin, MacroElement):
"""
A plugin that handles overlapping markers on a map by spreading them out in a spiral or circle pattern when clicked.
This plugin is useful when you have multiple markers in close proximity that would otherwise be difficult to interact with.
When a user clicks on a cluster of overlapping markers, they spread out in a 'spider' pattern, making each marker
individually accessible.
Markers are automatically identified and managed by the plugin, so there is no need to add them separately.
Simply add the plugin to the map using `oms.add_to(map)`.
Parameters
----------
keep_spiderfied : bool, default True
If true, markers stay spiderfied after clicking.
nearby_distance : int, default 20
Pixels away from a marker that is considered overlapping.
leg_weight : float, default 1.5
Weight of the spider legs.
circle_spiral_switchover : int, default 9
Number of markers at which to switch from circle to spiral pattern.
Example
-------
>>> oms = OverlappingMarkerSpiderfier(
... keep_spiderfied=True, nearby_distance=30, leg_weight=2.0
... )
>>> oms.add_to(map)
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
(function () {
try {
var oms = new OverlappingMarkerSpiderfier(
{{ this._parent.get_name() }},
{{ this.options|tojson }}
);
oms.addListener('spiderfy', function() {
{{ this._parent.get_name() }}.closePopup();
});
{%- for marker in this.markers %}
oms.addMarker({{ marker.get_name() }});
{%- endfor %}
} catch (error) {
console.error('Error initializing OverlappingMarkerSpiderfier:', error);
}
})();
{% endmacro %}
"""
)
default_js = [
(
"overlappingmarkerjs",
"https://cdnjs.cloudflare.com/ajax/libs/OverlappingMarkerSpiderfier-Leaflet/0.2.6/oms.min.js",
)
]
def __init__(
self,
keep_spiderfied: bool = True,
nearby_distance: int = 20,
leg_weight: float = 1.5,
circle_spiral_switchover: int = 9,
**kwargs
):
super().__init__()
self._name = "OverlappingMarkerSpiderfier"
self.options = parse_options(
keep_spiderfied=keep_spiderfied,
nearby_distance=nearby_distance,
leg_weight=leg_weight,
circle_spiral_switchover=circle_spiral_switchover,
**kwargs
)
def add_to(
self, parent: Element, name: Optional[str] = None, index: Optional[int] = None
) -> Element:
self._parent = parent
self.markers = self._get_all_markers(parent)
return super().add_to(parent, name=name, index=index)
def _get_all_markers(self, element: Element) -> list:
markers = []
for child in element._children.values():
if isinstance(child, Marker):
markers.append(child)
elif hasattr(child, "_children"):
markers.extend(self._get_all_markers(child))
return markers
| OverlappingMarkerSpiderfier |
python | getsentry__sentry | src/sentry/ingest/billing_metrics_consumer.py | {
"start": 732,
"end": 1053
} | class ____(ProcessingStrategyFactory[KafkaPayload]):
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
return BillingTxCountMetricConsumerStrategy(CommitOffsets(commit))
| BillingMetricsConsumerStrategyFactory |
python | getsentry__sentry | src/sentry/organizations/services/organization/impl.py | {
"start": 32976,
"end": 33519
} | class ____(OrganizationSignalService):
def schedule_signal(
self, signal: Signal, organization_id: int, args: Mapping[str, int | str | None]
) -> None:
_signal = RpcOrganizationSignal.from_signal(signal)
transaction.on_commit(
lambda: DatabaseBackedOrganizationService().send_signal(
organization_id=organization_id,
signal=_signal,
args=args,
),
router.db_for_write(Organization),
)
| OnCommitBackedOrganizationSignalService |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/executors/batch/test_utils.py | {
"start": 2244,
"end": 2706
} | class ____:
"""Tests for the BatchJobInfo dataclass."""
def test_batch_job_info_creation(self):
"""Test BatchJobInfo object creation."""
cmd = ["airflow", "tasks", "run"]
queue = "default_queue"
config = {"key": "value"}
job_info = BatchJobInfo(cmd=cmd, queue=queue, config=config)
assert job_info.cmd == cmd
assert job_info.queue == queue
assert job_info.config == config
| TestBatchJobInfo |
python | scipy__scipy | benchmarks/benchmarks/cluster.py | {
"start": 3529,
"end": 4269
} | class ____(XPBenchmark):
if is_xslow():
shape = [(10, 10), (32, 32), (100, 100), (320, 320),
(1000, 1000), (3200, 3200), (10_000, 10_000)]
else:
shape = [(10, 10), (100, 100)]
param_names = (*XPBenchmark.param_names, "shape")
params = (*XPBenchmark.params, shape)
def setup(self, backend, shape):
super().setup(backend, whiten, static_argnames="check_finite")
rng = np.random.default_rng(0)
obs = self.xp.asarray(rng.uniform(0, 100.0, size=shape))
self.obs = self.synchronize(obs)
if self.warmup:
self.func(self.obs, check_finite=False)
def time_whiten(self, backend, shape):
self.func(self.obs, check_finite=False)
| Whiten |
python | numpy__numpy | numpy/distutils/tests/test_exec_command.py | {
"start": 370,
"end": 843
} | class ____:
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
# note: closing sys.stdout won't close it.
self._stdout.close()
| redirect_stdout |
python | Netflix__metaflow | metaflow/cmd/develop/stub_generator.py | {
"start": 1818,
"end": 7908
} | class ____:
def __init__(self, start: int, end: int):
self._start = start
self._end = end
def start(self):
return self._start
def end(self):
return self._end
def type_var_to_str(t: TypeVar) -> str:
bound_name = None
if t.__bound__ is not None:
if isinstance(t.__bound__, typing.ForwardRef):
bound_name = t.__bound__.__forward_arg__
else:
bound_name = t.__bound__.__name__
return 'typing.TypeVar("%s", %scontravariant=%s, covariant=%s%s)' % (
t.__name__,
'bound="%s", ' % bound_name if t.__bound__ else "",
t.__contravariant__,
t.__covariant__,
", ".join([""] + [c.__name__ for c in t.__constraints__]),
)
def new_type_to_str(t: typing.NewType) -> str:
return 'typing.NewType("%s", %s)' % (t.__name__, t.__supertype__.__name__)
def descend_object(object: str, options: Iterable[str]):
# Returns true if:
# - options contains a prefix of object
# - the component after the prefix does not start with _
for opt in options:
new_object = object.removeprefix(opt)
if len(new_object) == len(object):
# There was no prefix, so we continue
continue
# Using [1] to skip the inevitable "."
if len(new_object) == 0 or new_object[1] != "_":
return True
return False
def parse_params_from_doc(doc: str) -> Tuple[List[inspect.Parameter], bool]:
parameters = []
no_arg_version = True
for line in doc.splitlines():
if non_indented_line.match(line):
match = param_name_type.match(line)
arg_name = type_name = is_optional = default = None
default_set = False
if match is not None:
arg_name = match.group("name")
type_name = match.group("type")
if type_name is not None:
type_detail = type_annotations.match(type_name)
if type_detail is not None:
type_name = type_detail.group("type")
is_optional = type_detail.group("optional") is not None
default = type_detail.group("default")
if default:
default_set = True
try:
default = eval(default)
except:
pass
try:
type_name = eval(type_name)
except:
pass
parameters.append(
inspect.Parameter(
name=arg_name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=(
default
if default_set
else None if is_optional else inspect.Parameter.empty
),
annotation=(Optional[type_name] if is_optional else type_name),
)
)
if not default_set:
# If we don't have a default set for any parameter, we can't
# have a no-arg version since the function would be incomplete
no_arg_version = False
return parameters, no_arg_version
def split_docs(
raw_doc: str, boundaries: List[Tuple[str, Union[StartEnd, re.Match]]]
) -> Dict[str, str]:
docs = dict()
boundaries.sort(key=lambda x: x[1].start())
section_start = 0
for idx in range(1, len(boundaries)):
docs[boundaries[idx - 1][0]] = raw_doc[
section_start : boundaries[idx][1].start()
]
section_start = boundaries[idx][1].end()
docs[boundaries[-1][0]] = raw_doc[section_start:]
return docs
def parse_add_to_docs(
raw_doc: str,
) -> Dict[str, Union[Tuple[inspect.Signature, str], str]]:
prop = None
return_type = None
property_indent = None
doc = []
add_to_docs = dict() # type: Dict[str, Union[str, Tuple[inspect.Signature, str]]]
def _add():
if prop:
add_to_docs[prop] = (
inspect.Signature(
[
inspect.Parameter(
"self", inspect.Parameter.POSITIONAL_OR_KEYWORD
)
],
return_annotation=return_type,
),
"\n".join(doc),
)
for line in raw_doc.splitlines():
# Parse stanzas that look like the following:
# <property-name> -> type
# indented doc string
if property_indent is not None and (
line.startswith(property_indent + " ") or line.strip() == ""
):
offset = len(property_indent)
if line.lstrip().startswith("@@ "):
line = line.replace("@@ ", "")
doc.append(line[offset:].rstrip())
else:
if line.strip() == 0:
continue
if prop:
# Ends a property stanza
_add()
# Now start a new one
line = line.rstrip()
property_indent = line[: len(line) - len(line.lstrip())]
# Either this has a -> to denote a property or it is a pure name
# to denote a reference to a function (starting with #)
line = line.lstrip()
if line.startswith("#"):
# The name of the function is the last part like metaflow.deployer.run
add_to_docs[line.split(".")[-1]] = line[1:]
continue
# This is a line so we split it using "->"
prop, return_type = line.split("->")
prop = prop.strip()
return_type = return_type.strip()
doc = []
_add()
return add_to_docs
def add_indent(indentation: str, text: str) -> str:
return "\n".join([indentation + line for line in text.splitlines()])
| StartEnd |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 116980,
"end": 118970
} | class ____(WebTestCase):
def get_handlers(self):
class EtagHandler(RequestHandler):
def get(self, computed_etag):
self.write(computed_etag)
def compute_etag(self):
return self._write_buffer[0]
return [("/etag/(.*)", EtagHandler)]
def test_wildcard_etag(self):
computed_etag = '"xyzzy"'
etags = "*"
self._test_etag(computed_etag, etags, 304)
def test_strong_etag_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy"'
self._test_etag(computed_etag, etags, 304)
def test_multiple_strong_etag_match(self):
computed_etag = '"xyzzy1"'
etags = '"xyzzy1", "xyzzy2"'
self._test_etag(computed_etag, etags, 304)
def test_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1"'
self._test_etag(computed_etag, etags, 200)
def test_multiple_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1", "xyzzy2"'
self._test_etag(computed_etag, etags, 200)
def test_weak_etag_match(self):
computed_etag = '"xyzzy1"'
etags = 'W/"xyzzy1"'
self._test_etag(computed_etag, etags, 304)
def test_multiple_weak_etag_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self._test_etag(computed_etag, etags, 304)
def test_weak_etag_not_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1"'
self._test_etag(computed_etag, etags, 200)
def test_multiple_weak_etag_not_match(self):
computed_etag = '"xyzzy3"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self._test_etag(computed_etag, etags, 200)
def _test_etag(self, computed_etag, etags, status_code):
response = self.fetch(
"/etag/" + computed_etag, headers={"If-None-Match": etags}
)
self.assertEqual(response.code, status_code)
| CacheTest |
python | doocs__leetcode | solution/2300-2399/2321.Maximum Score Of Spliced Array/Solution.py | {
"start": 0,
"end": 491
} | class ____:
def maximumsSplicedArray(self, nums1: List[int], nums2: List[int]) -> int:
def f(nums1, nums2):
d = [a - b for a, b in zip(nums1, nums2)]
t = mx = d[0]
for v in d[1:]:
if t > 0:
t += v
else:
t = v
mx = max(mx, t)
return mx
s1, s2 = sum(nums1), sum(nums2)
return max(s2 + f(nums1, nums2), s1 + f(nums2, nums1))
| Solution |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-groups-entering-a-competition.py | {
"start": 36,
"end": 308
} | class ____(object):
def maximumGroups(self, grades):
"""
:type grades: List[int]
:rtype: int
"""
# (1+x)*x/2 <= len(grades)
# => x <= ((1+8*len(grades))**0.5-1)/2.0
return int(((1+8*len(grades))**0.5-1)/2.0)
| Solution |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/trace_type_test.py | {
"start": 14820,
"end": 15940
} | class ____(test.TestCase):
def testTensorSpecs(self):
self.assertEqual(
trace_type.from_value(
tensor_spec.TensorSpec(shape=None),
trace_type.InternalTracingContext(is_legacy_signature=True)),
tensor_spec.TensorSpec(shape=None))
def testListofTensorSpecs(self):
self.assertEqual(
trace_type.from_value([
tensor_spec.TensorSpec(shape=None),
tensor_spec.TensorSpec(shape=None)
], trace_type.InternalTracingContext(is_legacy_signature=True)),
default_types.List(
tensor_spec.TensorSpec(shape=None),
tensor_spec.TensorSpec(shape=None)))
def testDictofTensorSpecs(self):
self.assertEqual(
trace_type.from_value(
{
'a': tensor_spec.TensorSpec(shape=None),
'b': tensor_spec.TensorSpec(shape=None)
}, trace_type.InternalTracingContext(is_legacy_signature=True)),
default_types.Dict({
'a': tensor_spec.TensorSpec(shape=None),
'b': tensor_spec.TensorSpec(shape=None)
}))
| SignatureToTraceTypeTest |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 23190,
"end": 23703
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, jwt: str):
"""Airbyte Source for Zoom Singer.
Documentation can be found at https://docs.airbyte.com/integrations/sources/zoom
Args:
name (str): The name of the destination.
jwt (str): Zoom JWT Token. See the docs for more information on how to obtain this key.
"""
self.jwt = check.str_param(jwt, "jwt")
super().__init__("Zoom Singer", name)
| ZoomSingerSource |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 62956,
"end": 63263
} | class ____(object):
def __init__(self):
self.guessing = 0
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### TreeParser ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| TreeParserSharedInputState |
python | pytorch__pytorch | torch/distributed/checkpoint/filesystem.py | {
"start": 6704,
"end": 15562
} | class ____:
"""
This is experimental, and will likely move elsewhere in the
future. It lives here to minimize changes while we are still
learning and gathering feedback.
"""
def __init__(
self, extensions: Optional[Sequence[StreamTransformExtension]] = None
) -> None:
"""
If the extensions arg is None, this means the implementation
should provide whatever defaults it chooses. An empty
sequence indicates no extensions should be used. At this
time, the default extensions sequence is empty.
"""
self.extensions = () if extensions is None else extensions
def transform_save_stream(
self, write_item: WriteItem, raw_stream: io.IOBase
) -> tuple[IO[bytes], list[str]]:
# In order to avoid leaking fds, transformers' close must
# cascade to wrapped streams, but since this function can
# append to the raw stream, we can't close the actual stream.
# So, we use this to put a wrapper around the raw stream's
# close() to make it a noop, and it gets closed once all files
# are appended.
class NoCloseWriter(io.IOBase):
def __init__(self, raw: io.IOBase):
self.raw = raw
def writeable(self) -> bool:
return True
def write(self, b: Buffer) -> int:
return self.raw.write(b)
def close(self):
self.flush()
self.raw.flush()
# but not close.
transform_to = cast(IO[bytes], NoCloseWriter(raw_stream))
for ex in self.extensions:
transform_to = ex.transform_to(transform_to)
return (transform_to, [ex.get_descriptor() for ex in reversed(self.extensions)])
def _item_size(item: WriteItem) -> int:
size = 1
if item.tensor_data is None:
raise AssertionError("WriteItem tensor_data must not be None")
# can't use math.prod as PT needs to support older python
for s in item.tensor_data.size:
size *= s
dtype = item.tensor_data.properties.dtype
return size * torch._utils._element_size(dtype)
def _split_by_size_and_type(bins: int, items: list[WriteItem]) -> list[list[WriteItem]]:
if bins == 1:
return [items]
bytes_w = [wi for wi in items if wi.type == WriteItemType.BYTE_IO]
tensor_w = [wi for wi in items if wi.type != WriteItemType.BYTE_IO]
buckets: list[list[WriteItem]] = [[] for _ in range(bins)]
bucket_sizes = [0 for _ in range(bins)]
tensor_w.sort(key=_item_size, reverse=True)
for i, wi in enumerate(bytes_w):
buckets[i % bins].append(wi)
for wi in tensor_w:
# TODO replace with headq
idx = min(enumerate(bucket_sizes), key=operator.itemgetter(1))[0]
buckets[idx].append(wi)
bucket_sizes[idx] += _item_size(wi)
return buckets
def _write_item(
transforms: _StorageWriterTransforms,
stream: io.IOBase,
data: Union[io.BytesIO, torch.Tensor],
write_item: WriteItem,
storage_key: str,
serialization_format: SerializationFormat,
) -> WriteResult:
offset = stream.tell()
(transform_to, transform_descriptors) = transforms.transform_save_stream(
write_item, stream
)
if write_item.type == WriteItemType.BYTE_IO:
if not isinstance(data, io.BytesIO):
raise AssertionError("Data must be io.BytesIO for BYTE_IO write items")
transform_to.write(data.getbuffer())
else:
if not isinstance(data, torch.Tensor):
raise AssertionError(
"Data must be torch.Tensor for non-BYTE_IO write items"
)
if data.device != torch.device("cpu"):
raise AssertionError("Tensor must be on CPU device")
if serialization_format == SerializationFormat.TORCH_SAVE:
torch.save(data, transform_to)
transform_to.close()
if serialization_format == SerializationFormat.TORCH_SAVE or isinstance(
data, io.BytesIO
):
length = stream.tell() - offset
else:
length = data.numel() * data.element_size()
# For consistency with earlier versions, leave this field out of the
# metadata if there are no extensions.
info_transform_descriptors = (
None if len(transform_descriptors) == 0 else transform_descriptors
)
return WriteResult(
index=write_item.index,
size_in_bytes=length,
storage_data=_StorageInfo(
storage_key,
offset,
length,
transform_descriptors=info_transform_descriptors,
),
)
def _write_files_from_queue(
create_stream: Callable,
file_queue: queue.Queue,
result_queue: queue.Queue,
planner: SavePlanner,
transforms: _StorageWriterTransforms,
inflight_threshhold: int,
use_fsync: bool,
thread_count: int,
serialization_format: SerializationFormat,
) -> None:
try:
while True:
file_name, storage_key, write_items = file_queue.get_nowait()
loader: _TensorLoader
custom_backend_name = torch._C._get_privateuse1_backend_name()
custom_device_mod = getattr(torch, custom_backend_name, None)
# TODO: Using the OverlappingCpuLoader with multiple threads creates significant
# performance degradation, observed as being related to cuda stream syncs. We
# should try to fix this and use _OverlappingCpuLoader for all threaded cases
if (
thread_count == 1
and (
torch.cuda.is_available()
or (custom_device_mod and custom_device_mod.is_available())
)
and inflight_threshhold > 0
):
loader = _OverlappingCpuLoader(
planner.resolve_data,
inflight_threshhold=inflight_threshhold,
)
else:
loader = _SerialCpuLoader(
planner.resolve_data,
)
tensor_w = [wi for wi in write_items if wi.type != WriteItemType.BYTE_IO]
for write_item in tensor_w:
loader.add(_item_size(write_item), write_item)
loader.start_loading()
bytes_w = [wi for wi in write_items if wi.type == WriteItemType.BYTE_IO]
write_results = []
with create_stream(file_name, "wb") as stream:
for write_item in bytes_w:
data = planner.resolve_data(write_item)
write_results.append(
_write_item(
transforms,
stream,
data,
write_item,
storage_key,
serialization_format,
)
)
tensor_dict = {}
metadata_dict = {}
for tensor, write_item in loader.values():
if not tensor.is_cpu:
raise AssertionError("Tensor must be on CPU")
write_results.append(
_write_item(
transforms,
stream,
tensor,
write_item, # type: ignore[arg-type]
storage_key,
serialization_format,
)
)
tensor_dict[write_item.index.fqn] = tensor # type: ignore[attr-defined]
metadata_dict[write_item.index.fqn] = { # type: ignore[attr-defined]
"saved_offsets": write_item.tensor_data.chunk.offsets # type: ignore[attr-defined]
}
if serialization_format == SerializationFormat.SAFETENSORS:
from safetensors.torch import save # type: ignore[import-not-found]
stream.write(
save(
tensor_dict,
metadata={
CUSTOM_METADATA_KEY: json.dumps(metadata_dict),
DCP_VERSION_KEY: str(HF_DCP_VERSION),
FORMAT_KEY: FORMAT_VALUE,
},
)
)
if use_fsync:
try:
os.fsync(stream.fileno())
except (AttributeError, UnsupportedOperation):
os.sync()
stream.close()
result_queue.put(write_results)
except queue.Empty:
pass
| _StorageWriterTransforms |
python | falconry__falcon | tests/test_uri_templates.py | {
"start": 923,
"end": 1152
} | class ____:
def __init__(self):
self.id = None
self.name = None
self.called = False
def on_get(self, req, resp, id):
self.id = id
self.called = True
self.req = req
| IDResource |
python | django__django | tests/forms_tests/tests/tests.py | {
"start": 906,
"end": 1031
} | class ____(ModelForm):
class Meta:
model = ChoiceModel
fields = ["name", "choice"]
| EmptyCharLabelChoiceForm |
python | django__django | tests/auth_tests/test_views.py | {
"start": 68590,
"end": 70335
} | class ____(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(
username="uuid", email="foo@bar.com", password="test"
)
self.assertTrue(self.client.login(username="uuid", password="test"))
user_change_url = reverse(
"custom_user_admin:auth_tests_uuiduser_change", args=(u.pk,)
)
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse(
"custom_user_admin:auth_user_password_change", args=(u.pk,)
)
response = self.client.get(password_change_url)
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="uuiduser_form">')
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(
password_change_url,
{
"password1": "password1",
"password2": "password1",
},
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest("id")
self.assertEqual(row.user_id, 1) # hardcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.get_change_message(), "Changed password.")
# The LogEntry.user column isn't altered to a UUID type so it's set to
# an integer manually in CustomUserAdmin to avoid an error. To avoid a
# constraint error, delete the entry before constraints are checked
# after the test.
row.delete()
| UUIDUserTests |
python | kamyu104__LeetCode-Solutions | Python/valid-boomerang.py | {
"start": 29,
"end": 334
} | class ____(object):
def isBoomerang(self, points):
"""
:type points: List[List[int]]
:rtype: bool
"""
return (points[0][0] - points[1][0]) * (points[0][1] - points[2][1]) - \
(points[0][0] - points[2][0]) * (points[0][1] - points[1][1]) != 0
| Solution |
python | plotly__plotly.py | plotly/graph_objs/layout/yaxis/_rangebreak.py | {
"start": 235,
"end": 11958
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.yaxis"
_path_str = "layout.yaxis.rangebreak"
_valid_props = {
"bounds",
"dvalue",
"enabled",
"name",
"pattern",
"templateitemname",
"values",
}
@property
def bounds(self):
"""
Sets the lower and upper bounds of this axis rangebreak. Can be
used with `pattern`.
The 'bounds' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'bounds[0]' property accepts values of any type
(1) The 'bounds[1]' property accepts values of any type
Returns
-------
list
"""
return self["bounds"]
@bounds.setter
def bounds(self, val):
self["bounds"] = val
@property
def dvalue(self):
"""
Sets the size of each `values` item. The default is one day in
milliseconds.
The 'dvalue' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["dvalue"]
@dvalue.setter
def dvalue(self, val):
self["dvalue"] = val
@property
def enabled(self):
"""
Determines whether this axis rangebreak is enabled or disabled.
Please note that `rangebreaks` only work for "date" axis type.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def pattern(self):
"""
Determines a pattern on the time line that generates breaks. If
*day of week* - days of the week in English e.g. 'Sunday' or
`sun` (matching is case-insensitive and considers only the
first three characters), as well as Sunday-based integers
between 0 and 6. If "hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info. Examples: - { pattern:
'day of week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8] } breaks from
5pm to 8am (i.e. skips non-work hours).
The 'pattern' property is an enumeration that may be specified as:
- One of the following enumeration values:
['day of week', 'hour', '']
Returns
-------
Any
"""
return self["pattern"]
@pattern.setter
def pattern(self, val):
self["pattern"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def values(self):
"""
Sets the coordinate values corresponding to the rangebreaks. An
alternative to `bounds`. Use `dvalue` to set the size of the
values along the axis.
The 'values' property is an info array that may be specified as:
* a list of elements where:
The 'values[i]' property accepts values of any type
Returns
-------
list
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
@property
def _prop_descriptions(self):
return """\
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The default is one
day in milliseconds.
enabled
Determines whether this axis rangebreak is enabled or
disabled. Please note that `rangebreaks` only work for
"date" axis type.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pattern
Determines a pattern on the time line that generates
breaks. If *day of week* - days of the week in English
e.g. 'Sunday' or `sun` (matching is case-insensitive
and considers only the first three characters), as well
as Sunday-based integers between 0 and 6. If "hour" -
hour (24-hour clock) as decimal numbers between 0 and
24. for more info. Examples: - { pattern: 'day of
week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips
the weekends). - { pattern: 'hour', bounds: [17, 8] }
breaks from 5pm to 8am (i.e. skips non-work hours).
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use `dvalue`
to set the size of the values along the axis.
"""
def __init__(
self,
arg=None,
bounds=None,
dvalue=None,
enabled=None,
name=None,
pattern=None,
templateitemname=None,
values=None,
**kwargs,
):
"""
Construct a new Rangebreak object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.yaxis.Rangebreak`
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The default is one
day in milliseconds.
enabled
Determines whether this axis rangebreak is enabled or
disabled. Please note that `rangebreaks` only work for
"date" axis type.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pattern
Determines a pattern on the time line that generates
breaks. If *day of week* - days of the week in English
e.g. 'Sunday' or `sun` (matching is case-insensitive
and considers only the first three characters), as well
as Sunday-based integers between 0 and 6. If "hour" -
hour (24-hour clock) as decimal numbers between 0 and
24. for more info. Examples: - { pattern: 'day of
week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips
the weekends). - { pattern: 'hour', bounds: [17, 8] }
breaks from 5pm to 8am (i.e. skips non-work hours).
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use `dvalue`
to set the size of the values along the axis.
Returns
-------
Rangebreak
"""
super().__init__("rangebreaks")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.yaxis.Rangebreak
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.yaxis.Rangebreak`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bounds", arg, bounds)
self._set_property("dvalue", arg, dvalue)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("pattern", arg, pattern)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("values", arg, values)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Rangebreak |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 24236,
"end": 25455
} | class ____(Node):
__slots__ = ('loc', 'name', 'arguments', 'type', 'directives',)
_fields = ('name', 'arguments', 'type',)
def __init__(self, name, arguments, type, loc=None, directives=None):
self.loc = loc
self.name = name
self.arguments = arguments
self.type = type
self.directives = directives
def __eq__(self, other):
return (
self is other or (
isinstance(other, FieldDefinition) and
# self.loc == other.loc and
self.name == other.name and
self.arguments == other.arguments and
self.type == other.type and
self.directives == other.directives
)
)
def __repr__(self):
return ('FieldDefinition('
'name={self.name!r}'
', arguments={self.arguments!r}'
', type={self.type!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.name,
self.arguments,
self.type,
self.loc,
self.directives,
)
def __hash__(self):
return id(self)
| FieldDefinition |
python | tiangolo__fastapi | scripts/deploy_docs_status.py | {
"start": 172,
"end": 391
} | class ____(BaseSettings):
github_repository: str
github_token: SecretStr
deploy_url: str | None = None
commit_sha: str
run_id: int
state: Literal["pending", "success", "error"] = "pending"
| Settings |
python | huggingface__transformers | src/transformers/models/glpn/modeling_glpn.py | {
"start": 10393,
"end": 12179
} | class ____(nn.Module):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
super().__init__()
self.layer_norm_1 = nn.LayerNorm(hidden_size)
self.attention = GLPNAttention(
config,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
sequence_reduction_ratio=sequence_reduction_ratio,
)
self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.layer_norm_2 = nn.LayerNorm(hidden_size)
mlp_hidden_size = int(hidden_size * mlp_ratio)
self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
self_attention_outputs = self.attention(
self.layer_norm_1(hidden_states), # in GLPN, layernorm is applied before self-attention
height,
width,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# first residual connection (with stochastic depth)
attention_output = self.drop_path(attention_output)
hidden_states = attention_output + hidden_states
mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
# second residual connection (with stochastic depth)
mlp_output = self.drop_path(mlp_output)
layer_output = mlp_output + hidden_states
outputs = (layer_output,) + outputs
return outputs
| GLPNLayer |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 78823,
"end": 79654
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
is_library_for_all_clusters: Optional[bool] = Field(
None,
description=(
"Whether the library was set to be installed on all clusters via the"
" libraries UI."
),
)
library: Optional[Library] = Field(
None, description="Unique identifier for the library."
)
messages: Optional[List[str]] = Field(
None,
description=(
"All the info and warning messages that have occurred so far for this"
" library."
),
)
status: Optional[LibraryInstallStatus] = Field(
None, description="Status of installing the library on the cluster."
)
| LibraryFullStatus |
python | tensorflow__tensorflow | tensorflow/python/util/nest_test.py | {
"start": 81496,
"end": 82336
} | class ____(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in range(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in range(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
| NestBenchmark |
python | conda__conda | conda/models/match_spec.py | {
"start": 33116,
"end": 34009
} | class ____(metaclass=ABCMeta):
def __init__(self, value):
self._raw_value = value
@abstractmethod
def match(self, other):
raise NotImplementedError()
def matches(self, value):
return self.match(value)
@property
def raw_value(self):
return self._raw_value
@abstractproperty
def exact_value(self):
"""If the match value is an exact specification, returns the value.
Otherwise returns None.
"""
raise NotImplementedError()
def merge(self, other):
if self.raw_value != other.raw_value:
raise ValueError(
f"Incompatible component merge:\n - {self.raw_value!r}\n - {other.raw_value!r}"
)
return self.raw_value
def union(self, other):
options = {self.raw_value, other.raw_value}
return "|".join(options)
| MatchInterface |
python | rushter__MLAlgorithms | mla/neuralnet/layers/convnet.py | {
"start": 4412,
"end": 7845
} | class ____(Layer):
"""Flattens multidimensional input into 2D matrix."""
def forward_pass(self, X):
self.last_input_shape = X.shape
return X.reshape((X.shape[0], -1))
def backward_pass(self, delta):
return delta.reshape(self.last_input_shape)
def shape(self, x_shape):
return x_shape[0], np.prod(x_shape[1:])
def image_to_column(images, filter_shape, stride, padding):
"""Rearrange image blocks into columns.
Parameters
----------
filter_shape : tuple(height, width)
images : np.array, shape (n_images, n_channels, height, width)
padding: tuple(height, width)
stride : tuple (height, width)
"""
n_images, n_channels, height, width = images.shape
f_height, f_width = filter_shape
out_height, out_width = convoltuion_shape(
height, width, (f_height, f_width), stride, padding
)
images = np.pad(images, ((0, 0), (0, 0), padding, padding), mode="constant")
col = np.zeros((n_images, n_channels, f_height, f_width, out_height, out_width))
for y in range(f_height):
y_bound = y + stride[0] * out_height
for x in range(f_width):
x_bound = x + stride[1] * out_width
col[:, :, y, x, :, :] = images[
:, :, y : y_bound : stride[0], x : x_bound : stride[1]
]
col = col.transpose(0, 4, 5, 1, 2, 3).reshape(n_images * out_height * out_width, -1)
return col
def column_to_image(columns, images_shape, filter_shape, stride, padding):
"""Rearrange columns into image blocks.
Parameters
----------
columns
images_shape : tuple(n_images, n_channels, height, width)
filter_shape : tuple(height, _width)
stride : tuple(height, width)
padding : tuple(height, width)
"""
n_images, n_channels, height, width = images_shape
f_height, f_width = filter_shape
out_height, out_width = convoltuion_shape(
height, width, (f_height, f_width), stride, padding
)
columns = columns.reshape(
n_images, out_height, out_width, n_channels, f_height, f_width
).transpose(0, 3, 4, 5, 1, 2)
img_h = height + 2 * padding[0] + stride[0] - 1
img_w = width + 2 * padding[1] + stride[1] - 1
img = np.zeros((n_images, n_channels, img_h, img_w))
for y in range(f_height):
y_bound = y + stride[0] * out_height
for x in range(f_width):
x_bound = x + stride[1] * out_width
img[:, :, y : y_bound : stride[0], x : x_bound : stride[1]] += columns[
:, :, y, x, :, :
]
return img[:, :, padding[0] : height + padding[0], padding[1] : width + padding[1]]
def convoltuion_shape(img_height, img_width, filter_shape, stride, padding):
"""Calculate output shape for convolution layer."""
height = (img_height + 2 * padding[0] - filter_shape[0]) / float(stride[0]) + 1
width = (img_width + 2 * padding[1] - filter_shape[1]) / float(stride[1]) + 1
assert height % 1 == 0
assert width % 1 == 0
return int(height), int(width)
def pooling_shape(pool_shape, image_shape, stride):
"""Calculate output shape for pooling layer."""
n_images, n_channels, height, width = image_shape
height = (height - pool_shape[0]) / float(stride[0]) + 1
width = (width - pool_shape[1]) / float(stride[1]) + 1
assert height % 1 == 0
assert width % 1 == 0
return int(height), int(width)
| Flatten |
python | tiangolo__fastapi | tests/test_webhooks_security.py | {
"start": 271,
"end": 4667
} | class ____(BaseModel):
username: str
monthly_fee: float
start_date: datetime
@app.webhooks.post("new-subscription")
def new_subscription(
body: Subscription, token: Annotated[str, Security(bearer_scheme)]
):
"""
When a new user subscribes to your service we'll send you a POST request with this
data to the URL that you register for the event `new-subscription` in the dashboard.
"""
client = TestClient(app)
def test_dummy_webhook():
# Just for coverage
new_subscription(body={}, token="Bearer 123")
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
# insert_assert(response.json())
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {},
"webhooks": {
"new-subscription": {
"post": {
"summary": "New Subscription",
"description": "When a new user subscribes to your service we'll send you a POST request with this\ndata to the URL that you register for the event `new-subscription` in the dashboard.",
"operationId": "new_subscriptionnew_subscription_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Subscription"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"security": [{"HTTPBearer": []}],
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {"$ref": "#/components/schemas/ValidationError"},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Subscription": {
"properties": {
"username": {"type": "string", "title": "Username"},
"monthly_fee": {"type": "number", "title": "Monthly Fee"},
"start_date": {
"type": "string",
"format": "date-time",
"title": "Start Date",
},
},
"type": "object",
"required": ["username", "monthly_fee", "start_date"],
"title": "Subscription",
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
},
"securitySchemes": {"HTTPBearer": {"type": "http", "scheme": "bearer"}},
},
}
| Subscription |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/win32stat.py | {
"start": 1305,
"end": 3828
} | class ____(ctypes.Structure):
_fields_ = [('dwFileAttributes', ctypes.wintypes.DWORD),
('ftCreationTime', FILETIME),
('ftLastAccessTime', FILETIME),
('ftLastWriteTime', FILETIME),
('dwVolumeSerialNumber', ctypes.wintypes.DWORD),
('nFileSizeHigh', ctypes.wintypes.DWORD),
('nFileSizeLow', ctypes.wintypes.DWORD),
('nNumberOfLinks', ctypes.wintypes.DWORD),
('nFileIndexHigh', ctypes.wintypes.DWORD),
('nFileIndexLow', ctypes.wintypes.DWORD)]
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.restype = ctypes.wintypes.HANDLE
CreateFile.argtypes = (
ctypes.c_wchar_p,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.c_void_p,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.HANDLE,
)
GetFileInformationByHandle = ctypes.windll.kernel32.GetFileInformationByHandle
GetFileInformationByHandle.restype = ctypes.wintypes.BOOL
GetFileInformationByHandle.argtypes = (
ctypes.wintypes.HANDLE,
ctypes.wintypes.POINTER(BY_HANDLE_FILE_INFORMATION),
)
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.restype = ctypes.wintypes.BOOL
CloseHandle.argtypes = (ctypes.wintypes.HANDLE,)
StatResult = namedtuple('StatResult', 'st_dev st_ino st_mode st_mtime')
def _to_mode(attr):
m = 0
if (attr & FILE_ATTRIBUTE_DIRECTORY):
m |= stdstat.S_IFDIR | 0o111
else:
m |= stdstat.S_IFREG
if (attr & FILE_ATTRIBUTE_READONLY):
m |= 0o444
else:
m |= 0o666
return m
def _to_unix_time(ft):
t = (ft.dwHighDateTime) << 32 | ft.dwLowDateTime
return (t / 10000000) - 11644473600
def stat(path):
hfile = CreateFile(path,
FILE_READ_ATTRIBUTES,
0,
None,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
None)
if hfile == INVALID_HANDLE_VALUE:
raise ctypes.WinError
info = BY_HANDLE_FILE_INFORMATION()
r = GetFileInformationByHandle(hfile, info)
CloseHandle(hfile)
if not r:
raise ctypes.WinError
return StatResult(st_dev=info.dwVolumeSerialNumber,
st_ino=(info.nFileIndexHigh << 32) + info.nFileIndexLow,
st_mode=_to_mode(info.dwFileAttributes),
st_mtime=_to_unix_time(info.ftLastWriteTime)
)
| BY_HANDLE_FILE_INFORMATION |
python | django__django | tests/sitemaps_tests/urls/http.py | {
"start": 2000,
"end": 2094
} | class ____(SimpleSitemap):
lastmod = datetime(2013, 4, 20, 5, 0, 0)
| FixedNewerLastmodSitemap |
python | dask__dask | dask/dataframe/dask_expr/_cumulative.py | {
"start": 1185,
"end": 1644
} | class ____(Blockwise):
_parameters = ["frame", "axis", "skipna", "operation"]
_defaults = {"skipna": True, "axis": None}
_projection_passthrough = True
@functools.cached_property
def _meta(self):
return self.frame._meta
@functools.cached_property
def operation(self):
return self.operand("operation")
@functools.cached_property
def _args(self) -> list:
return self.operands[:-1]
| CumulativeBlockwise |
python | django-import-export__django-import-export | import_export/management/commands/export.py | {
"start": 187,
"end": 2136
} | class ____(BaseCommand):
help = "Export data from a specified resource or model in a chosen format."
def add_arguments(self, parser):
default_format_names = get_default_format_names()
parser.add_argument(
"format",
help=f"""Specify the export format. Can be one of the default formats
({default_format_names}), or a custom format class provided as a dotted path
(e.g., 'XLSX' or 'mymodule.CustomCSV').""",
)
parser.add_argument(
"resource",
help="""Specify the resource or model to export. Accepts a resource class or
a model class in dotted path format "(e.g., 'mymodule.resources.MyResource'
or 'auth.User').""",
)
parser.add_argument(
"--encoding",
help="Specify the encoding to use for the exported data (e.g., 'utf-8'). "
"This applies to text-based formats.",
)
def handle(self, *args, **options):
model_or_resource_class = options.get("resource")
format_name = options.get("format")
encoding = options.get("encoding")
resource = get_resource_class(model_or_resource_class)()
format_class = get_format_class(format_name, None, encoding)
data = resource.export()
export_data = format_class.export_data(data)
if not format_class.is_binary():
if encoding:
export_data = export_data.encode(encoding)
else:
export_data = export_data.encode()
if format_class.is_binary() and self.stdout.isatty():
self.stderr.write(
self.style.ERROR(
"This is a binary format and your terminal does not support "
"binary data. Redirect the output to a file."
)
)
sys.exit(1)
self.stdout.buffer.write(export_data)
| Command |
python | Netflix__metaflow | test/cmd/develop/test_stub_generator.py | {
"start": 370,
"end": 468
} | class ____(typing.Generic[T, U]):
"""Complex generic test class"""
pass
| ComplexGenericClass |
python | facelessuser__soupsieve | tests/test_level4/test_past.py | {
"start": 49,
"end": 785
} | class ____(util.TestCase):
"""Test past selectors."""
MARKUP = """
<body>
<div id="div">
<p id="0">Some text <span id="1" class="foo:bar:foobar"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
<a id="3">Placeholder text.</a>
</p>
</div>
</body>
"""
def test_past(self):
"""Test past (should match nothing)."""
self.assert_selector(
self.MARKUP,
"p:past",
[],
flags=util.HTML
)
def test_not_past(self):
"""Test not past."""
self.assert_selector(
self.MARKUP,
"p:not(:past)",
["0"],
flags=util.HTML
)
| TestPast |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datafusion.py | {
"start": 3357,
"end": 4152
} | class ____:
@mock.patch(RESOURCE_PATH_TO_DICT_STR)
@mock.patch(HOOK_STR)
def test_execute_check_hook_call_should_execute_successfully(self, mock_hook, mock_resource_path_to_dict):
mock_resource_path_to_dict.return_value = {"projects": PROJECT_ID}
op = CloudDataFusionRestartInstanceOperator(
task_id="test_tasks",
instance_name=INSTANCE_NAME,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute(context=mock.MagicMock())
mock_hook.return_value.restart_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
assert mock_hook.return_value.wait_for_operation.call_count == 1
| TestCloudDataFusionRestartInstanceOperator |
python | PyCQA__pylint | pylint/extensions/for_any_all.py | {
"start": 645,
"end": 5849
} | class ____(BaseChecker):
name = "consider-using-any-or-all"
msgs = {
"C0501": (
"`for` loop could be `%s`",
"consider-using-any-or-all",
"A for loop that checks for a condition and return a bool can be replaced with any or all.",
)
}
@only_required_for_messages("consider-using-any-or-all")
def visit_for(self, node: nodes.For) -> None:
if len(node.body) != 1: # Only If node with no Else
return
if not isinstance(node.body[0], nodes.If):
return
if_children = list(node.body[0].get_children())
if any(isinstance(child, nodes.If) for child in if_children):
# an if node within the if-children indicates an elif clause,
# suggesting complex logic.
return
node_after_loop = node.next_sibling()
if self._assigned_reassigned_returned(node, if_children, node_after_loop):
final_return_bool = node_after_loop.value.name
suggested_string = self._build_suggested_string(node, final_return_bool)
self.add_message(
"consider-using-any-or-all",
node=node,
args=suggested_string,
confidence=HIGH,
)
return
if self._if_statement_returns_bool(if_children, node_after_loop):
final_return_bool = node_after_loop.value.value
suggested_string = self._build_suggested_string(node, final_return_bool)
self.add_message(
"consider-using-any-or-all",
node=node,
args=suggested_string,
confidence=HIGH,
)
return
@staticmethod
def _if_statement_returns_bool(
if_children: list[nodes.NodeNG], node_after_loop: nodes.NodeNG
) -> bool:
"""Detect for-loop, if-statement, return pattern:
Ex:
def any_uneven(items):
for item in items:
if not item % 2 == 0:
return True
return False
"""
if not len(if_children) == 2:
# The If node has only a comparison and return
return False
if not returns_bool(if_children[1]):
return False
# Check for terminating boolean return right after the loop
return returns_bool(node_after_loop)
@staticmethod
def _assigned_reassigned_returned(
node: nodes.For, if_children: list[nodes.NodeNG], node_after_loop: nodes.NodeNG
) -> bool:
"""Detect boolean-assign, for-loop, re-assign, return pattern:
Ex:
def check_lines(lines, max_chars):
long_line = False
for line in lines:
if len(line) > max_chars:
long_line = True
# no elif / else statement
return long_line
"""
node_before_loop = node.previous_sibling()
if not assigned_bool(node_before_loop):
# node before loop isn't assigning to boolean
return False
assign_children = [x for x in if_children if isinstance(x, nodes.Assign)]
if not assign_children:
# if-nodes inside loop aren't assignments
return False
# We only care for the first assign node of the if-children. Otherwise it breaks the pattern.
first_target = assign_children[0].targets[0]
target_before_loop = node_before_loop.targets[0]
if not (
isinstance(first_target, nodes.AssignName)
and isinstance(target_before_loop, nodes.AssignName)
):
return False
node_before_loop_name = node_before_loop.targets[0].name
return (
first_target.name == node_before_loop_name
and isinstance(node_after_loop, nodes.Return)
and isinstance(node_after_loop.value, nodes.Name)
and node_after_loop.value.name == node_before_loop_name
)
@staticmethod
def _build_suggested_string(node: nodes.For, final_return_bool: bool) -> str:
"""When a nodes.For node can be rewritten as an any/all statement, return a
suggestion for that statement.
'final_return_bool' is the boolean literal returned after the for loop if all
conditions fail.
"""
loop_var = node.target.as_string()
loop_iter = node.iter.as_string()
test_node = next(node.body[0].get_children())
match test_node:
case nodes.UnaryOp(op="not"):
# The condition is negated. Advance the node to the operand and modify the suggestion
test_node = test_node.operand
suggested_function = "all" if final_return_bool else "not all"
case _:
suggested_function = "not any" if final_return_bool else "any"
test = test_node.as_string()
return f"{suggested_function}({test} for {loop_var} in {loop_iter})"
def register(linter: PyLinter) -> None:
linter.register_checker(ConsiderUsingAnyOrAllChecker(linter))
| ConsiderUsingAnyOrAllChecker |
python | getsentry__sentry | src/sentry/sentry_apps/api/serializers/sentry_app_webhook_request.py | {
"start": 663,
"end": 753
} | class ____(TypedDict):
organization: RpcOrganizationMapping | None
| _BufferedRequestAttrs |
python | protocolbuffers__protobuf | python/google/protobuf/internal/well_known_types.py | {
"start": 21306,
"end": 23338
} | class ____(object):
"""Class for ListValue message type."""
__slots__ = ()
def __len__(self):
return len(self.values)
def append(self, value):
_SetStructValue(self.values.add(), value)
def extend(self, elem_seq):
for value in elem_seq:
self.append(value)
def __getitem__(self, index):
"""Retrieves item by the specified index."""
return _GetStructValue(self.values.__getitem__(index))
def __setitem__(self, index, value):
_SetStructValue(self.values.__getitem__(index), value)
def __delitem__(self, key):
del self.values[key]
def _internal_assign(self, elem_seq):
self.Clear()
self.extend(elem_seq)
def _internal_compare(self, other):
size = len(self)
if size != len(other):
return False
for i in range(size):
if isinstance(other[i], (dict, list)):
if not self[i]._internal_compare(other[i]):
return False
elif self[i] != other[i]:
return False
return True
def items(self):
for i in range(len(self)):
yield self[i]
def add_struct(self):
"""Appends and returns a struct value as the next value in the list."""
struct_value = self.values.add().struct_value
# Clear will mark struct_value modified which will indeed create a struct.
struct_value.Clear()
return struct_value
def add_list(self):
"""Appends and returns a list value as the next value in the list."""
list_value = self.values.add().list_value
# Clear will mark list_value modified which will indeed create a list.
list_value.Clear()
return list_value
collections.abc.MutableSequence.register(ListValue)
# LINT.IfChange(wktbases)
WKTBASES = {
'google.protobuf.Any': Any,
'google.protobuf.Duration': Duration,
'google.protobuf.FieldMask': FieldMask,
'google.protobuf.ListValue': ListValue,
'google.protobuf.Struct': Struct,
'google.protobuf.Timestamp': Timestamp,
}
# LINT.ThenChange(//depot/google.protobuf/compiler/python/pyi_generator.cc:wktbases)
| ListValue |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_superfences.py | {
"start": 35630,
"end": 36445
} | class ____(util.MdCase):
"""Test custom validator and format."""
extension = ['pymdownx.superfences']
extension_configs = {
'pymdownx.superfences': {
'custom_fences': [
{
'name': 'test',
'class': 'test',
'format': custom_format,
'validator': custom_validator_except
}
]
}
}
def test_custom_fail_exception(self):
"""Test custom fences forced exception."""
with self.assertRaises(SuperFencesException):
self.check_markdown(
r'''
```test
test
```
''',
'',
True
)
| TestSuperFencesCustomException |
python | mlflow__mlflow | mlflow/server/fastapi_security.py | {
"start": 3438,
"end": 6433
} | class ____:
"""Middleware to actively block cross-origin state-changing requests."""
def __init__(self, app: ASGIApp, allowed_origins: list[str]):
self.app = app
self.allowed_origins = allowed_origins
async def __call__(self, scope, receive, send):
if scope["type"] != "http":
return await self.app(scope, receive, send)
if not is_api_endpoint(scope["path"]):
return await self.app(scope, receive, send)
method = scope["method"]
headers = dict(scope["headers"])
origin = headers.get(b"origin", b"").decode("utf-8")
if should_block_cors_request(origin, method, self.allowed_origins):
_logger.warning(f"Blocked cross-origin request from {origin}")
await send(
{
"type": "http.response.start",
"status": HTTPStatus.FORBIDDEN,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send(
{
"type": "http.response.body",
"body": CORS_BLOCKED_MSG.encode(),
}
)
return
await self.app(scope, receive, send)
def get_allowed_hosts() -> list[str]:
"""Get list of allowed hosts from environment or defaults."""
return get_allowed_hosts_from_env() or get_default_allowed_hosts()
def get_allowed_origins() -> list[str]:
"""Get list of allowed CORS origins from environment or defaults."""
return get_allowed_origins_from_env() or []
def init_fastapi_security(app: FastAPI) -> None:
"""
Initialize security middleware for FastAPI application.
This configures:
- Host header validation (DNS rebinding protection) via TrustedHostMiddleware
- CORS protection via CORSMiddleware
- Security headers via custom middleware
Args:
app: FastAPI application instance.
"""
if MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE.get() == "true":
return
app.add_middleware(SecurityHeadersMiddleware)
allowed_origins = get_allowed_origins()
if allowed_origins and "*" in allowed_origins:
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
expose_headers=["*"],
)
else:
app.add_middleware(CORSBlockingMiddleware, allowed_origins=allowed_origins)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"],
allow_headers=["*"],
expose_headers=["*"],
)
allowed_hosts = get_allowed_hosts()
if allowed_hosts and "*" not in allowed_hosts:
app.add_middleware(HostValidationMiddleware, allowed_hosts=allowed_hosts)
| CORSBlockingMiddleware |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.