language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | has2k1__plotnine | plotnine/exceptions.py | {
"start": 888,
"end": 1616
} | class ____(UserWarning):
"""
Warnings for ggplot inconsistencies
"""
def deprecated_themeable_name(cls):
"""
Decorator to deprecate the name of a themeable
"""
old_init = cls.__init__
@functools.wraps(cls.__init__)
def new_init(self, *args, **kwargs):
old_name = cls.__name__
new_name = cls.mro()[1].__name__
msg = (
f"\nThemeable '{old_name}' has been renamed to '{new_name}'.\n"
f"'{old_name}' is now deprecated and will be removed in "
"a future release."
)
warnings.warn(msg, category=FutureWarning, stacklevel=4)
old_init(self, *args, **kwargs)
cls.__init__ = new_init
return cls
| PlotnineWarning |
python | mlflow__mlflow | mlflow/genai/optimize/types.py | {
"start": 1353,
"end": 3468
} | class ____:
"""
Configuration for prompt optimization.
Args:
num_instruction_candidates: Number of candidate instructions to generate
during each optimization iteration. Higher values may lead to better
results but increase optimization time. Default: 6
max_few_shot_examples: Maximum number of examples to show in few-shot
demonstrations. Default: 6
num_threads: Number of threads to use for parallel optimization.
Default: (number of CPU cores * 2 + 1)
optimizer_llm: Optional LLM parameters for the teacher model. If not provided,
the target LLM will be used as the teacher.
algorithm: The optimization algorithm to use. When a string is provided,
it must be one of the supported algorithms: "DSPy/MIPROv2".
When a BasePromptOptimizer is provided, it will be used as the optimizer.
Default: "DSPy/MIPROv2"
verbose: Whether to show optimizer logs during optimization. Default: False
autolog: Whether to enable automatic logging and prompt registration.
If set to True, a MLflow run is automatically created to store optimization
parameters, datasets and metrics, and the optimized prompt is registered.
If set to False, the raw optimized template is returned without registration.
Default: True
convert_to_single_text: Whether to convert the optimized prompt to a single prompt.
Default: True
extract_instructions: Whether to extract instructions from the initial prompt.
Default: True
"""
num_instruction_candidates: int = 6
max_few_shot_examples: int = 6
num_threads: int = field(default_factory=lambda: (multiprocessing.cpu_count() or 1) * 2 + 1)
optimizer_llm: LLMParams | None = None
algorithm: str | type["BasePromptOptimizer"] = "DSPy/MIPROv2"
verbose: bool = False
autolog: bool = True
convert_to_single_text: bool = True
extract_instructions: bool = True
@experimental(version="3.5.0")
@dataclass
| OptimizerConfig |
python | openai__openai-python | src/openai/types/webhooks/batch_cancelled_webhook_event.py | {
"start": 239,
"end": 326
} | class ____(BaseModel):
id: str
"""The unique ID of the batch API request."""
| Data |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/app.py | {
"start": 5644,
"end": 11199
} | class ____(Cadwyn):
# Workaround lack of customzation https://github.com/zmievsa/cadwyn/issues/255
async def openapi_jsons(self, req: Request) -> JSONResponse:
resp = await super().openapi_jsons(req)
open_apischema = json.loads(resp.body)
open_apischema = self.customize_openapi(open_apischema)
resp.body = resp.render(open_apischema)
return resp
def customize_openapi(self, openapi_schema: dict[str, Any]) -> dict[str, Any]:
"""
Customize the OpenAPI schema to include additional schemas not tied to specific endpoints.
This is particularly useful for client SDKs that require models for types
not directly exposed in any endpoint's request or response schema.
We also replace ``anyOf`` with ``oneOf`` in the API spec as this produces better results for the code
generators. This is because anyOf can technically be more than of the given schemas, but 99.9% of the
time (perhaps 100% in this API) the types are mutually exclusive, so oneOf is more correct
References:
- https://fastapi.tiangolo.com/how-to/extending-openapi/#modify-the-openapi-schema
"""
extra_schemas = get_extra_schemas()
for schema_name, schema in extra_schemas.items():
if schema_name not in openapi_schema["components"]["schemas"]:
openapi_schema["components"]["schemas"][schema_name] = schema
# The `JsonValue` component is missing any info. causes issues when generating models
openapi_schema["components"]["schemas"]["JsonValue"] = {
"title": "Any valid JSON value",
"oneOf": [
{"type": t} for t in ("string", "number", "integer", "object", "array", "boolean", "null")
],
}
def replace_any_of_with_one_of(spec):
if isinstance(spec, dict):
return {
("oneOf" if key == "anyOf" else key): replace_any_of_with_one_of(value)
for key, value in spec.items()
}
if isinstance(spec, list):
return [replace_any_of_with_one_of(item) for item in spec]
return spec
openapi_schema = replace_any_of_with_one_of(openapi_schema)
for comp in openapi_schema["components"]["schemas"].values():
for prop in comp.get("properties", {}).values():
# {"type": "string", "const": "deferred"}
# to
# {"type": "string", "enum": ["deferred"]}
#
# this produces better results in the code generator
if prop.get("type") == "string" and (const := prop.pop("const", None)):
prop["enum"] = [const]
return openapi_schema
def create_task_execution_api_app() -> FastAPI:
"""Create FastAPI app for task execution API."""
from airflow.api_fastapi.execution_api.routes import execution_api_router
from airflow.api_fastapi.execution_api.versions import bundle
def custom_generate_unique_id(route: APIRoute):
# This is called only if the route doesn't provide an explicit operation ID
return route.name
# See https://docs.cadwyn.dev/concepts/version_changes/ for info about API versions
app = CadwynWithOpenAPICustomization(
title="Airflow Task Execution API",
description="The private Airflow Task Execution API.",
lifespan=lifespan,
generate_unique_id_function=custom_generate_unique_id,
api_version_parameter_name="Airflow-API-Version",
api_version_default_value=bundle.versions[0].value,
versions=bundle,
)
# Add correlation-id middleware for request tracing
app.add_middleware(CorrelationIdMiddleware)
app.add_middleware(JWTReissueMiddleware)
app.generate_and_include_versioned_routers(execution_api_router)
# As we are mounted as a sub app, we don't get any logs for unhandled exceptions without this!
@app.exception_handler(Exception)
def handle_exceptions(request: Request, exc: Exception):
logger.exception("Handle died with an error", exc_info=(type(exc), exc, exc.__traceback__))
content = {"message": "Internal server error"}
if correlation_id := request.headers.get("correlation-id"):
content["correlation-id"] = correlation_id
return JSONResponse(status_code=500, content=content)
return app
def get_extra_schemas() -> dict[str, dict]:
"""Get all the extra schemas that are not part of the main FastAPI app."""
from airflow.api_fastapi.execution_api.datamodels.taskinstance import TaskInstance
from airflow.executors.workloads import BundleInfo
from airflow.task.trigger_rule import TriggerRule
from airflow.task.weight_rule import WeightRule
from airflow.utils.state import TaskInstanceState, TerminalTIState
return {
"TaskInstance": TaskInstance.model_json_schema(),
"BundleInfo": BundleInfo.model_json_schema(),
# Include the combined state enum too. In the datamodels we separate out SUCCESS from the other states
# as that has different payload requirements
"TerminalTIState": {"type": "string", "enum": list(TerminalTIState)},
"TaskInstanceState": {"type": "string", "enum": list(TaskInstanceState)},
"WeightRule": {"type": "string", "enum": list(WeightRule)},
"TriggerRule": {"type": "string", "enum": list(TriggerRule)},
}
@attrs.define()
| CadwynWithOpenAPICustomization |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/decorators/op_decorator.py | {
"start": 1289,
"end": 10770
} | class ____:
def __init__(
self,
name: Optional[str] = None,
description: Optional[str] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
config_schema: Optional[Union[Any, Mapping[str, Any]]] = None,
tags: Optional[Mapping[str, Any]] = None,
code_version: Optional[str] = None,
decorator_takes_context: Optional[bool] = True,
retry_policy: Optional[RetryPolicy] = None,
ins: Optional[Mapping[str, In]] = None,
out: Optional[Union[Out, Mapping[str, Out]]] = None,
pool: Optional[str] = None,
):
self.name = check.opt_str_param(name, "name")
self.decorator_takes_context = check.bool_param(
decorator_takes_context, "decorator_takes_context"
)
self.description = check.opt_str_param(description, "description")
# these will be checked within OpDefinition
self.required_resource_keys = required_resource_keys
self.tags = tags
self.code_version = code_version
self.retry_policy = retry_policy
self.pool = pool
# config will be checked within OpDefinition
self.config_schema = config_schema
self.ins = check.opt_nullable_mapping_param(ins, "ins", key_type=str, value_type=In)
self.out = out
def __call__(self, fn: Callable[..., Any]) -> "OpDefinition":
from dagster._config.pythonic_config import validate_resource_annotated_function
from dagster._core.definitions.op_definition import OpDefinition
validate_resource_annotated_function(fn)
if not self.name:
self.name = fn.__name__
compute_fn = (
DecoratedOpFunction(decorated_fn=fn)
if self.decorator_takes_context
else NoContextDecoratedOpFunction(decorated_fn=fn)
)
compute_fn.validate_malformed_config()
if compute_fn.has_config_arg():
check.param_invariant(
self.config_schema is None or self.config_schema == {},
"If the @op has a config arg, you cannot specify a config schema",
)
from dagster._config.pythonic_config import infer_schema_from_config_annotation
# Parse schema from the type annotation of the config arg
config_arg = compute_fn.get_config_arg()
config_arg_type = config_arg.annotation
config_arg_default = config_arg.default
self.config_schema = infer_schema_from_config_annotation(
config_arg_type, config_arg_default
)
outs: Optional[Mapping[str, Out]] = None
if self.out is not None and isinstance(self.out, Out):
outs = {DEFAULT_OUTPUT: self.out}
elif self.out is not None:
outs = check.mapping_param(self.out, "out", key_type=str, value_type=Out)
arg_resource_keys = {arg.name for arg in compute_fn.get_resource_args()}
decorator_resource_keys = set(self.required_resource_keys or [])
check.param_invariant(
len(decorator_resource_keys) == 0 or len(arg_resource_keys) == 0,
"Cannot specify resource requirements in both @op decorator and as arguments to the"
" decorated function",
)
resolved_resource_keys = decorator_resource_keys.union(arg_resource_keys)
op_def = OpDefinition.dagster_internal_init(
name=self.name,
ins=self.ins,
outs=outs,
compute_fn=compute_fn,
config_schema=self.config_schema,
description=self.description or format_docstring_for_description(fn),
required_resource_keys=resolved_resource_keys,
tags=self.tags,
code_version=self.code_version,
retry_policy=self.retry_policy,
version=None, # code_version has replaced version
pool=self.pool,
)
update_wrapper(op_def, compute_fn.decorated_fn)
return op_def
@overload
def op(compute_fn: Callable[..., Any]) -> "OpDefinition": ...
@overload
def op(
*,
name: Optional[str] = ...,
description: Optional[str] = ...,
ins: Optional[Mapping[str, In]] = ...,
out: Optional[Union[Out, Mapping[str, Out]]] = ...,
config_schema: Optional[UserConfigSchema] = ...,
required_resource_keys: Optional[AbstractSet[str]] = ...,
tags: Optional[Mapping[str, Any]] = ...,
version: Optional[str] = ...,
retry_policy: Optional[RetryPolicy] = ...,
code_version: Optional[str] = ...,
pool: Optional[str] = None,
) -> _Op: ...
@public
@deprecated_param(
param="version", breaking_version="2.0", additional_warn_text="Use `code_version` instead"
)
def op(
compute_fn: Optional[Callable] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
ins: Optional[Mapping[str, In]] = None,
out: Optional[Union[Out, Mapping[str, Out]]] = None,
config_schema: Optional[UserConfigSchema] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
tags: Optional[Mapping[str, Any]] = None,
version: Optional[str] = None,
retry_policy: Optional[RetryPolicy] = None,
code_version: Optional[str] = None,
pool: Optional[str] = None,
) -> Union["OpDefinition", _Op]:
"""Create an op with the specified parameters from the decorated function.
Ins and outs will be inferred from the type signature of the decorated function
if not explicitly provided.
The decorated function will be used as the op's compute function. The signature of the
decorated function is more flexible than that of the ``compute_fn`` in the core API; it may:
1. Return a value. This value will be wrapped in an :py:class:`Output` and yielded by the compute function.
2. Return an :py:class:`Output`. This output will be yielded by the compute function.
3. Yield :py:class:`Output` or other :ref:`event objects <events>`. Same as default compute behavior.
Note that options 1) and 2) are incompatible with yielding other events -- if you would like
to decorate a function that yields events, it must also wrap its eventual output in an
:py:class:`Output` and yield it.
@op supports ``async def`` functions as well, including async generators when yielding multiple
events or outputs. Note that async ops will generally be run on their own unless using a custom
:py:class:`Executor` implementation that supports running them together.
Args:
name (Optional[str]): Name of op. Must be unique within any :py:class:`GraphDefinition`
using the op.
description (Optional[str]): Human-readable description of this op. If not provided, and
the decorated function has docstring, that docstring will be used as the description.
ins (Optional[Dict[str, In]]):
Information about the inputs to the op. Information provided here will be combined
with what can be inferred from the function signature.
out (Optional[Union[Out, Dict[str, Out]]]):
Information about the op outputs. Information provided here will be combined with
what can be inferred from the return type signature if the function does not use yield.
config_schema (Optional[ConfigSchema): The schema for the config. If set, Dagster will check
that config provided for the op matches this schema and fail if it does not. If not
set, Dagster will accept any config provided for the op.
required_resource_keys (Optional[Set[str]]): Set of resource handles required by this op.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may
expect and require certain metadata to be attached to a op. Values that are not strings
will be json encoded and must meet the criteria that `json.loads(json.dumps(value)) == value`.
code_version (Optional[str]): Version of the logic encapsulated by the op. If set,
this is used as a default version for all outputs.
retry_policy (Optional[RetryPolicy]): The retry policy for this op.
Examples:
.. code-block:: python
@op
def hello_world():
print('hello')
@op
def echo(msg: str) -> str:
return msg
@op(
ins={'msg': In(str)},
out=Out(str)
)
def echo_2(msg): # same as above
return msg
@op(
out={'word': Out(), 'num': Out()}
)
def multi_out() -> Tuple[str, int]:
return 'cool', 4
"""
code_version = normalize_renamed_param(
code_version,
"code_version",
version,
"version",
)
if compute_fn is not None:
check.invariant(description is None)
check.invariant(config_schema is None)
check.invariant(required_resource_keys is None)
check.invariant(tags is None)
check.invariant(version is None)
return _Op()(compute_fn)
return _Op(
name=name,
description=description,
config_schema=config_schema,
required_resource_keys=required_resource_keys,
tags=tags,
code_version=code_version,
retry_policy=retry_policy,
ins=ins,
out=out,
pool=pool,
)
| _Op |
python | ray-project__ray | python/ray/data/tests/preprocessors/test_encoder.py | {
"start": 24762,
"end": 35769
} | class ____:
"""Test basic serialization/deserialization functionality for all encoder preprocessors."""
def setup_method(self):
"""Set up test data for encoders."""
# Data for categorical encoders
self.categorical_df = pd.DataFrame(
{
"category": ["A", "B", "C", "A", "B", "C", "A"],
"grade": ["high", "medium", "low", "high", "medium", "low", "high"],
"region": ["north", "south", "east", "west", "north", "south", "east"],
}
)
# Data for multi-hot encoder (with lists)
self.multihot_df = pd.DataFrame(
{
"tags": [
["red", "car"],
["blue", "bike"],
["red", "truck"],
["green", "car"],
],
"features": [
["fast", "loud"],
["quiet"],
["fast", "heavy"],
["quiet", "light"],
],
}
)
# Data for label encoder
self.label_df = pd.DataFrame(
{
"target": ["cat", "dog", "bird", "cat", "dog", "bird"],
"other": [1, 2, 3, 4, 5, 6],
}
)
def test_ordinal_encoder_serialization(self):
"""Test OrdinalEncoder save/load functionality."""
# Create and fit encoder
encoder = OrdinalEncoder(columns=["category", "grade"])
dataset = ray.data.from_pandas(self.categorical_df)
fitted_encoder = encoder.fit(dataset)
# Test CloudPickle serialization (primary format)
serialized = fitted_encoder.serialize()
assert isinstance(serialized, bytes)
assert serialized.startswith(SerializablePreprocessor.MAGIC_CLOUDPICKLE)
# Test deserialization
deserialized = SerializablePreprocessor.deserialize(serialized)
assert isinstance(deserialized, OrdinalEncoder)
assert deserialized._fitted
assert deserialized.columns == ["category", "grade"]
assert deserialized.encode_lists is True # default value
# Test functional equivalence
test_df = pd.DataFrame({"category": ["A", "B"], "grade": ["high", "low"]})
original_result = fitted_encoder.transform_batch(test_df.copy())
deserialized_result = deserialized.transform_batch(test_df.copy())
pd.testing.assert_frame_equal(original_result, deserialized_result)
def test_onehot_encoder_serialization(self):
"""Test OneHotEncoder save/load functionality."""
# Create and fit encoder
encoder = OneHotEncoder(columns=["category"], max_categories={"category": 3})
dataset = ray.data.from_pandas(self.categorical_df)
fitted_encoder = encoder.fit(dataset)
# Test CloudPickle serialization (primary format)
serialized = fitted_encoder.serialize()
assert isinstance(serialized, bytes)
assert serialized.startswith(SerializablePreprocessor.MAGIC_CLOUDPICKLE)
# Test deserialization
deserialized = SerializablePreprocessor.deserialize(serialized)
assert isinstance(deserialized, OneHotEncoder)
assert deserialized._fitted
assert deserialized.columns == ["category"]
assert deserialized.max_categories == {"category": 3}
# Test functional equivalence
test_df = pd.DataFrame({"category": ["A", "B", "C"]})
original_result = fitted_encoder.transform_batch(test_df.copy())
deserialized_result = deserialized.transform_batch(test_df.copy())
pd.testing.assert_frame_equal(original_result, deserialized_result)
def test_multihot_encoder_serialization(self):
"""Test MultiHotEncoder save/load functionality."""
# Create and fit encoder
encoder = MultiHotEncoder(columns=["tags"], max_categories={"tags": 5})
dataset = ray.data.from_pandas(self.multihot_df)
fitted_encoder = encoder.fit(dataset)
# Test CloudPickle serialization (primary format)
serialized = fitted_encoder.serialize()
assert isinstance(serialized, bytes)
assert serialized.startswith(SerializablePreprocessor.MAGIC_CLOUDPICKLE)
# Test deserialization
deserialized = SerializablePreprocessor.deserialize(serialized)
assert isinstance(deserialized, MultiHotEncoder)
assert deserialized._fitted
assert deserialized.columns == ["tags"]
assert deserialized.max_categories == {"tags": 5}
# Test functional equivalence
test_df = pd.DataFrame({"tags": [["red", "car"], ["blue", "bike"]]})
original_result = fitted_encoder.transform_batch(test_df.copy())
deserialized_result = deserialized.transform_batch(test_df.copy())
pd.testing.assert_frame_equal(original_result, deserialized_result)
def test_label_encoder_serialization(self):
"""Test LabelEncoder save/load functionality."""
# Create and fit encoder
encoder = LabelEncoder(label_column="target")
dataset = ray.data.from_pandas(self.label_df)
fitted_encoder = encoder.fit(dataset)
# Test CloudPickle serialization (primary format)
serialized = fitted_encoder.serialize()
assert isinstance(serialized, bytes)
assert serialized.startswith(SerializablePreprocessor.MAGIC_CLOUDPICKLE)
# Test deserialization
deserialized = SerializablePreprocessor.deserialize(serialized)
assert isinstance(deserialized, LabelEncoder)
assert deserialized._fitted
assert deserialized.label_column == "target"
assert deserialized.output_column == "target" # default
# Test functional equivalence
test_df = pd.DataFrame({"target": ["cat", "dog", "bird"]})
original_result = fitted_encoder.transform_batch(test_df.copy())
deserialized_result = deserialized.transform_batch(test_df.copy())
pd.testing.assert_frame_equal(original_result, deserialized_result)
def test_categorizer_serialization(self):
"""Test Categorizer save/load functionality."""
# Create categorizer with predefined dtypes
sex_dtype = pd.CategoricalDtype(categories=["male", "female"], ordered=False)
grade_dtype = pd.CategoricalDtype(
categories=["high", "medium", "low"], ordered=True
)
categorizer = Categorizer(
columns=["category", "grade"],
dtypes={"category": sex_dtype, "grade": grade_dtype},
)
# Test CloudPickle serialization (primary format, even without fitting)
serialized = categorizer.serialize()
assert isinstance(serialized, bytes)
assert serialized.startswith(SerializablePreprocessor.MAGIC_CLOUDPICKLE)
# Test deserialization
deserialized = SerializablePreprocessor.deserialize(serialized)
assert isinstance(deserialized, Categorizer)
assert deserialized.columns == ["category", "grade"]
# Test dtypes preservation
assert len(deserialized.dtypes) == 2
assert isinstance(deserialized.dtypes["category"], pd.CategoricalDtype)
assert isinstance(deserialized.dtypes["grade"], pd.CategoricalDtype)
# Check category preservation
assert list(deserialized.dtypes["category"].categories) == ["male", "female"]
assert deserialized.dtypes["category"].ordered is False
assert list(deserialized.dtypes["grade"].categories) == [
"high",
"medium",
"low",
]
assert deserialized.dtypes["grade"].ordered is True
def test_categorizer_fitted_serialization(self):
"""Test Categorizer save/load functionality after fitting."""
# Create and fit categorizer (without predefined dtypes)
categorizer = Categorizer(columns=["category", "grade"])
dataset = ray.data.from_pandas(self.categorical_df)
fitted_categorizer = categorizer.fit(dataset)
# Test CloudPickle serialization (primary format)
serialized = fitted_categorizer.serialize()
assert isinstance(serialized, bytes)
assert serialized.startswith(SerializablePreprocessor.MAGIC_CLOUDPICKLE)
# Test deserialization
deserialized = SerializablePreprocessor.deserialize(serialized)
assert isinstance(deserialized, Categorizer)
assert deserialized._fitted
assert deserialized.columns == ["category", "grade"]
# Test functional equivalence
test_df = pd.DataFrame({"category": ["A", "B"], "grade": ["high", "low"]})
original_result = fitted_categorizer.transform_batch(test_df.copy())
deserialized_result = deserialized.transform_batch(test_df.copy())
pd.testing.assert_frame_equal(original_result, deserialized_result)
def test_encoder_serialization_formats(self):
"""Test that encoders work with different serialization formats."""
encoder = OrdinalEncoder(columns=["category"])
dataset = ray.data.from_pandas(self.categorical_df)
fitted_encoder = encoder.fit(dataset)
# Test CloudPickle format (default)
cloudpickle_serialized = fitted_encoder.serialize()
assert isinstance(cloudpickle_serialized, bytes)
# Test Pickle format (legacy)
pickle_serialized = fitted_encoder.serialize()
assert isinstance(pickle_serialized, bytes)
# Both should deserialize to equivalent objects
cloudpickle_deserialized = SerializablePreprocessor.deserialize(
cloudpickle_serialized
)
pickle_deserialized = SerializablePreprocessor.deserialize(pickle_serialized)
# Test functional equivalence
test_df = pd.DataFrame({"category": ["A", "B"]})
cloudpickle_result = cloudpickle_deserialized.transform_batch(test_df.copy())
pickle_result = pickle_deserialized.transform_batch(test_df.copy())
pd.testing.assert_frame_equal(cloudpickle_result, pickle_result)
def test_encoder_error_handling(self):
"""Test error handling for encoder serialization."""
# Test unknown preprocessor type
import cloudpickle
unknown_data = {
"type": "NonExistentEncoder",
"version": 1,
"fields": {"columns": ["test"]},
"stats": {},
"stats_type": "default",
}
fake_serialized = (
SerializablePreprocessor.MAGIC_CLOUDPICKLE + cloudpickle.dumps(unknown_data)
)
from ray.data.preprocessors.version_support import UnknownPreprocessorError
with pytest.raises(UnknownPreprocessorError) as exc_info:
SerializablePreprocessor.deserialize(fake_serialized)
assert exc_info.value.preprocessor_type == "NonExistentEncoder"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-sv", __file__]))
| TestEncoderSerialization |
python | ray-project__ray | release/ray_release/cluster_manager/minimal.py | {
"start": 481,
"end": 9598
} | class ____(ClusterManager):
"""Minimal manager.
Builds app config and compute template but does not start or stop session.
"""
@retry(
init_delay_sec=10,
jitter_sec=5,
max_retry_count=2,
exceptions=(ClusterEnvCreateError,),
)
def create_cluster_env(self):
assert self.cluster_env_id is None
assert self.cluster_env_name
logger.info(
f"Test uses a cluster env with name "
f"{self.cluster_env_name}. Looking up existing "
f"cluster envs with this name."
)
self.cluster_env_id = create_cluster_env_from_image(
image=self.test.get_anyscale_byod_image(),
test_name=self.cluster_env_name,
runtime_env=self.test.get_byod_runtime_env(),
sdk=self.sdk,
cluster_env_id=self.cluster_env_id,
cluster_env_name=self.cluster_env_name,
)
def build_cluster_env(self, timeout: float = 600.0):
assert self.cluster_env_id
assert self.cluster_env_build_id is None
# Fetch build
build_id = None
last_status = None
error_message = None
config_json = None
result = self.sdk.list_cluster_environment_builds(self.cluster_env_id)
if not result or not result.results:
raise ClusterEnvBuildError(f"No build found for cluster env: {result}")
build = sorted(result.results, key=lambda b: b.created_at)[-1]
build_id = build.id
last_status = build.status
error_message = build.error_message
config_json = build.config_json
if last_status == "succeeded":
logger.info(
f"Link to succeeded cluster env build: "
f"{format_link(anyscale_cluster_env_build_url(build_id))}"
)
self.cluster_env_build_id = build_id
return
if last_status == "failed":
logger.info(f"Previous cluster env build failed: {error_message}")
logger.info("Starting new cluster env build...")
# Retry build
result = self.sdk.create_cluster_environment_build(
dict(
cluster_environment_id=self.cluster_env_id, config_json=config_json
)
)
build_id = result.result.id
logger.info(
f"Link to created cluster env build: "
f"{format_link(anyscale_cluster_env_build_url(build_id))}"
)
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
timeout_at = time.monotonic() + timeout
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(
f"Track progress here: "
f"{format_link(anyscale_cluster_env_build_url(build_id))}"
)
while not completed:
now = time.time()
if now > next_report:
logger.info(
f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ..."
)
next_report = next_report + REPORT_S
result = self.sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise ClusterEnvBuildError(
f"Cluster env build failed. Please see "
f"{anyscale_cluster_env_build_url(build_id)} for details. "
f"Error message: {build.error_message}"
)
if build.status == "succeeded":
logger.info("Build succeeded.")
self.cluster_env_build_id = build_id
return
completed = build.status not in ["in_progress", "pending"]
if completed:
raise ClusterEnvBuildError(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_cluster_env_build_url(build_id)} for details"
)
if time.monotonic() > timeout_at:
raise ClusterEnvBuildTimeout(
f"Time out when building cluster env {self.cluster_env_name}"
)
time.sleep(1)
self.cluster_env_build_id = build_id
def create_cluster_compute(self, _repeat: bool = True):
assert self.cluster_compute_id is None
if self.cluster_compute:
assert self.cluster_compute
logger.info(
f"Tests uses compute template "
f"with name {self.cluster_compute_name}. "
f"Looking up existing cluster computes."
)
paging_token = None
while not self.cluster_compute_id:
result = self.sdk.search_cluster_computes(
dict(
project_id=self.project_id,
name=dict(equals=self.cluster_compute_name),
include_anonymous=True,
paging=dict(paging_token=paging_token),
)
)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == self.cluster_compute_name:
self.cluster_compute_id = res.id
logger.info(
f"Cluster compute already exists "
f"with ID {self.cluster_compute_id}"
)
break
if not paging_token:
break
if not self.cluster_compute_id:
logger.info(
f"Cluster compute not found. "
f"Creating with name {self.cluster_compute_name}."
)
try:
result = self.sdk.create_cluster_compute(
dict(
name=self.cluster_compute_name,
project_id=self.project_id,
config=self.cluster_compute,
)
)
self.cluster_compute_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create cluster "
f"compute: {e}. Sleeping for 10 seconds and then "
f"try again once..."
)
time.sleep(10)
return self.create_cluster_compute(_repeat=False)
raise ClusterComputeCreateError(
"Could not create cluster compute"
) from e
logger.info(
f"Cluster compute template created with "
f"name {self.cluster_compute_name} and "
f"ID {self.cluster_compute_id}"
)
def build_configs(self, timeout: float = 30.0):
try:
self.create_cluster_compute()
except AssertionError as e:
# If already exists, ignore
logger.warning(str(e))
except ClusterComputeCreateError as e:
raise e
except Exception as e:
raise ClusterComputeCreateError(
f"Unexpected cluster compute build error: {e}"
) from e
try:
self.create_cluster_env()
except AssertionError as e:
# If already exists, ignore
logger.warning(str(e))
except ClusterEnvCreateError as e:
raise e
except Exception as e:
raise ClusterEnvCreateError(
f"Unexpected cluster env create error: {e}"
) from e
try:
self.build_cluster_env(timeout=timeout)
except AssertionError as e:
# If already exists, ignore
logger.warning(str(e))
except (ClusterEnvBuildError, ClusterEnvBuildTimeout) as e:
raise e
except Exception as e:
raise ClusterEnvBuildError(
f"Unexpected cluster env build error: {e}"
) from e
def delete_configs(self):
if self.cluster_id:
self.sdk.delete_cluster(self.cluster_id)
if self.cluster_env_build_id:
self.sdk.delete_cluster_environment_build(self.cluster_env_build_id)
if self.cluster_env_id:
self.sdk.delete_cluster_environment(self.cluster_env_id)
if self.cluster_compute_id:
self.sdk.delete_cluster_compute(self.cluster_compute_id)
def start_cluster(self, timeout: float = 600.0):
pass
def terminate_cluster_ex(self, wait: bool = False):
pass
def get_cluster_address(self) -> str:
return f"anyscale://{self.project_name}/{self.cluster_name}"
| MinimalClusterManager |
python | huggingface__transformers | src/transformers/models/glm4v/modeling_glm4v.py | {
"start": 23546,
"end": 26871
} | class ____(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
and "Generating Long Sequences with Sparse Transformers".
"""
def __init__(self, config: Glm4vTextConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.is_causal = True
self.attention_dropout = config.attention_dropout
self.rope_parameters = config.rope_parameters
self.scaling = self.head_dim**-0.5
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_multimodal_rotary_pos_emb( # diff with Llama
query_states, key_states, cos, sin, self.rope_parameters["mrope_section"]
)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Glm4vTextAttention |
python | pytorch__pytorch | test/distributed/test_store.py | {
"start": 21722,
"end": 22103
} | class ____(TCPStoreTest):
_use_libuv = True
def _create_store(self):
store = create_tcp_store(use_libuv=True)
store.set_timeout(timedelta(seconds=300))
return store
def _create_store_with_ws(self, addr, world_size):
return create_tcp_store(
addr, world_size, wait_for_workers=False, use_libuv=True
)
| LibUvTCPStoreTest |
python | dagster-io__dagster | python_modules/automation/automation/scaffold_logs_viewer/server.py | {
"start": 164,
"end": 5350
} | class ____(BaseHTTPRequestHandler):
def __init__(self, *args, logs_directory: Path, **kwargs):
self.logs_directory = logs_directory
super().__init__(*args, **kwargs)
def do_GET(self):
parsed_path = urlparse(self.path)
path = parsed_path.path
if path == "/" or path == "/index.html":
self._serve_index()
elif path == "/api/sessions":
self._serve_sessions_list()
elif path.startswith("/api/sessions/"):
filename = unquote(path.split("/")[-1])
self._serve_session_data(filename)
else:
self._send_error(404, "Not found")
def _serve_index(self):
index_path = Path(__file__).parent / "index.html"
try:
with open(index_path) as f:
content = f.read()
self._send_response(200, content, "text/html")
except Exception as e:
self._send_error(500, f"Error serving index: {e}")
def _serve_sessions_list(self):
try:
sessions = []
for file_path in self.logs_directory.glob("scaffold_diagnostics_*.jsonl"):
# Parse first line to get session info
with open(file_path) as f:
first_line = f.readline().strip()
if first_line:
session_start = json.loads(first_line)
if session_start.get("type") == "session_start":
sessions.append(
{
"filename": file_path.name,
"correlation_id": session_start.get(
"correlation_id", "unknown"
),
"timestamp": session_start.get("timestamp", "unknown"),
}
)
# Sort by timestamp descending (most recent first)
sessions.sort(key=lambda x: x["timestamp"], reverse=True)
self._send_json_response(sessions)
except Exception as e:
self._send_error(500, f"Error loading sessions: {e}")
def _serve_session_data(self, filename: str):
try:
file_path = self.logs_directory / filename
if not file_path.exists():
self._send_error(404, "Session not found")
return
entries = []
session_start = None
with open(file_path) as f:
for line_content in f:
stripped_line = line_content.strip()
if stripped_line:
entry = json.loads(stripped_line)
if entry.get("type") == "session_start":
session_start = entry
entries.append(entry)
session_data = {
"filename": filename,
"correlation_id": session_start.get("correlation_id", "unknown")
if session_start
else "unknown",
"start_time": session_start.get("timestamp", "unknown")
if session_start
else "unknown",
"entries": entries,
}
self._send_json_response(session_data)
except Exception as e:
self._send_error(500, f"Error loading session data: {e}")
def _send_response(self, status_code: int, content: str, content_type: str):
self.send_response(status_code)
self.send_header("Content-type", content_type)
self.send_header("Content-Length", str(len(content.encode("utf-8"))))
self.end_headers()
self.wfile.write(content.encode("utf-8"))
def _send_json_response(self, data: Any):
content = json.dumps(data, indent=2)
self._send_response(200, content, "application/json")
def _send_error(self, status_code: int, message: str):
content = json.dumps({"error": message})
self._send_response(status_code, content, "application/json")
def log_message(self, format: str, *args): # noqa: A002
# Suppress default logging
pass
def create_handler_class(logs_directory: Path):
"""Factory function to create handler class with logs directory bound."""
class BoundHandler(ScaffoldBranchLogsHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, logs_directory=logs_directory, **kwargs)
return BoundHandler
def serve_logs(logs_directory: Path, port: int = 8000, host: str = "localhost"):
"""Start the web server to browse scaffold branch logs."""
handler_class = create_handler_class(logs_directory)
with HTTPServer((host, port), handler_class) as httpd:
import click
click.echo(f"Serving scaffold logs viewer at http://{host}:{port}")
click.echo(f"Logs directory: {logs_directory}")
click.echo("Press Ctrl+C to stop")
try:
httpd.serve_forever()
except KeyboardInterrupt:
click.echo("\nShutting down server...")
| ScaffoldBranchLogsHandler |
python | ray-project__ray | python/ray/dag/py_obj_scanner.py | {
"start": 711,
"end": 3676
} | class ____(ray.cloudpickle.CloudPickler, Generic[SourceType, TransformedType]):
"""Utility to find and replace the `source_type` in Python objects.
`source_type` can either be a single type or a tuple of multiple types.
The caller must first call `find_nodes()`, then compute a replacement table and
pass it to `replace_nodes`.
This uses cloudpickle under the hood, so all sub-objects that are not `source_type`
must be serializable.
Args:
source_type: the type(s) of object to find and replace. Default to DAGNodeBase.
"""
def __init__(self, source_type: Union[Type, Tuple] = DAGNodeBase):
self.source_type = source_type
# Buffer to keep intermediate serialized state.
self._buf = io.BytesIO()
# List of top-level SourceType found during the serialization pass.
self._found = None
# List of other objects found during the serialization pass.
# This is used to store references to objects so they won't be
# serialized by cloudpickle.
self._objects = []
# Replacement table to consult during deserialization.
self._replace_table: Dict[SourceType, TransformedType] = None
_instances[id(self)] = self
super().__init__(self._buf)
def reducer_override(self, obj):
"""Hook for reducing objects.
Objects of `self.source_type` are saved to `self._found` and a global map so
they can later be replaced.
All other objects fall back to the default `CloudPickler` serialization.
"""
if isinstance(obj, self.source_type):
index = len(self._found)
self._found.append(obj)
return _get_node, (id(self), index)
return super().reducer_override(obj)
def find_nodes(self, obj: Any) -> List[SourceType]:
"""
Serialize `obj` and store all instances of `source_type` found in `_found`.
Args:
obj: The object to scan for `source_type`.
Returns:
A list of all instances of `source_type` found in `obj`.
"""
assert (
self._found is None
), "find_nodes cannot be called twice on the same PyObjScanner instance."
self._found = []
self._objects = []
self.dump(obj)
return self._found
def replace_nodes(self, table: Dict[SourceType, TransformedType]) -> Any:
"""Replace previously found DAGNodes per the given table."""
assert self._found is not None, "find_nodes must be called first"
self._replace_table = table
self._buf.seek(0)
return pickle.load(self._buf)
def _replace_index(self, i: int) -> SourceType:
return self._replace_table[self._found[i]]
def clear(self):
"""Clear the scanner from the _instances"""
if id(self) in _instances:
del _instances[id(self)]
def __del__(self):
self.clear()
| _PyObjScanner |
python | huggingface__transformers | src/transformers/models/metaclip_2/modeling_metaclip_2.py | {
"start": 2975,
"end": 7573
} | class ____(nn.Module):
def __init__(self, config: MetaClip2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})."
)
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| MetaClip2VisionEmbeddings |
python | mahmoud__boltons | boltons/tableutils.py | {
"start": 5702,
"end": 6000
} | class ____(InputType):
def check_type(self, obj):
return isinstance(obj, MutableSequence)
def guess_headers(self, obj):
return None
def get_entry(self, obj, headers):
return obj
def get_entry_seq(self, obj_seq, headers):
return obj_seq
| ListInputType |
python | optuna__optuna | optuna/storages/_rdb/alembic/versions/v3.0.0.b.py | {
"start": 1115,
"end": 2956
} | class ____(BaseModel):
__tablename__ = "trial_values"
trial_value_id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trials.trial_id"), nullable=False)
value = Column(Float, nullable=False)
def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
if (
session.query(TrialValueModel)
.join(TrialModel, TrialValueModel.trial_id == TrialModel.trial_id)
.filter(and_(TrialModel.state == TrialState.COMPLETE, TrialValueModel.value.is_(None)))
.count()
) != 0:
raise ValueError("Found invalid trial_values records (value=None and state='COMPLETE')")
session.query(TrialValueModel).filter(TrialValueModel.value.is_(None)).delete()
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.alter_column(
"intermediate_value",
type_=Float(precision=FLOAT_PRECISION),
nullable=True,
)
with op.batch_alter_table("trial_params") as batch_op:
batch_op.alter_column(
"param_value",
type_=Float(precision=FLOAT_PRECISION),
existing_nullable=True,
)
with op.batch_alter_table("trial_values") as batch_op:
batch_op.alter_column(
"value",
type_=Float(precision=FLOAT_PRECISION),
nullable=False,
)
def downgrade():
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.alter_column("intermediate_value", type_=Float, nullable=False)
with op.batch_alter_table("trial_params") as batch_op:
batch_op.alter_column("param_value", type_=Float, existing_nullable=True)
with op.batch_alter_table("trial_values") as batch_op:
batch_op.alter_column("value", type_=Float, existing_nullable=False)
| TrialValueModel |
python | mlflow__mlflow | mlflow/llama_index/pyfunc_wrapper.py | {
"start": 5642,
"end": 6098
} | class ____(_LlamaIndexModelWrapperBase):
@property
def engine_type(self):
return RETRIEVER_ENGINE_NAME
def _predict_single(self, *args, **kwargs) -> list[dict[str, Any]]:
response = self._llama_model.retrieve(*args, **kwargs)
return [node.dict() for node in response]
def _format_predict_input(self, data) -> "QueryBundle":
return _format_predict_input_query_engine_and_retriever(data)
| RetrieverEngineWrapper |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/pretty.py | {
"start": 16720,
"end": 35848
} | class ____:
"""A line in repr output."""
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ''}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
)
def _is_namedtuple(obj: Any) -> bool:
"""Checks if an object is most likely a namedtuple. It is possible
to craft an object that passes this check and isn't a namedtuple, but
there is only a minuscule chance of this happening unintentionally.
Args:
obj (Any): The object to test
Returns:
bool: True if the object is a namedtuple. False otherwise.
"""
try:
fields = getattr(obj, "_fields", None)
except Exception:
# Being very defensive - if we cannot get the attr then its not a namedtuple
return False
return isinstance(obj, tuple) and isinstance(fields, tuple)
def traverse(
_object: Any,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
max_depth (int, optional): Maximum depth of data structures, or None for no maximum.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and _safe_isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:
"""Walk the object depth first."""
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
obj_type = type(obj)
children: List[Node]
reached_max_depth = max_depth is not None and depth >= max_depth
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if _safe_isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
push_visited(obj_id)
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if reached_max_depth:
if angular:
node = Node(value_repr=f"<{class_name}...>")
else:
node = Node(value_repr=f"{class_name}(...)")
else:
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if _safe_isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child, depth=depth + 1)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg, depth=depth + 1)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
pop_visited(obj_id)
elif _is_attr_object(obj) and not fake_attributes:
push_visited(obj_id)
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
if reached_max_depth:
node = Node(value_repr=f"{obj.__class__.__name__}(...)")
else:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> (
Iterable[Tuple[str, Any, Optional[Callable[[Any], str]]]]
):
"""Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
# Can happen, albeit rarely
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value, depth=depth + 1)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
pop_visited(obj_id)
elif (
is_dataclass(obj)
and not _safe_isinstance(obj, type)
and not fake_attributes
and _is_dataclass_repr(obj)
):
push_visited(obj_id)
children = []
append = children.append
if reached_max_depth:
node = Node(value_repr=f"{obj.__class__.__name__}(...)")
else:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
empty=f"{obj.__class__.__name__}()",
)
for last, field in loop_last(
field for field in fields(obj) if field.repr
):
child_node = _traverse(getattr(obj, field.name), depth=depth + 1)
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj):
push_visited(obj_id)
class_name = obj.__class__.__name__
if reached_max_depth:
# If we've reached the max depth, we still show the class name, but not its contents
node = Node(
value_repr=f"{class_name}(...)",
)
else:
children = []
append = children.append
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
empty=f"{class_name}()",
)
for last, (key, value) in loop_last(obj._asdict().items()):
child_node = _traverse(value, depth=depth + 1)
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif _safe_isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if _safe_isinstance(obj, container_type):
obj_type = container_type
break
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if reached_max_depth:
node = Node(value_repr=f"{open_brace}...{close_brace}")
elif obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if _safe_isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child, depth=depth + 1)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child, depth=depth + 1)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items - max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = _safe_isinstance(obj, tuple)
node.is_namedtuple = _is_namedtuple(obj)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if _safe_isinstance(_object, Node):
node = _object
else:
node = traverse(
_object, max_length=max_length, max_string=max_string, max_depth=max_depth
)
repr_str: str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
max_depth=max_depth,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
from typing import NamedTuple
class StockKeepingUnit(NamedTuple):
name: str
description: str
price: float
category: str
reviews: List[str]
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"namedtuple": StockKeepingUnit(
"Sparkling British Spring Water",
"Carbonated spring water",
0.9,
"water",
["its amazing!", "its terrible!"],
),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore[attr-defined]
from pip._vendor.rich import print
print(Pretty(data, indent_guides=True, max_string=20))
class Thing:
def __repr__(self) -> str:
return "Hello\x1b[38;5;239m World!"
print(Pretty(Thing()))
| _Line |
python | RaRe-Technologies__gensim | gensim/test/test_ensemblelda.py | {
"start": 583,
"end": 20009
} | class ____(unittest.TestCase):
def get_elda(self):
return EnsembleLda(
corpus=common_corpus, id2word=common_dictionary, num_topics=NUM_TOPICS,
passes=PASSES, num_models=NUM_MODELS, random_state=RANDOM_STATE,
topic_model_class=LdaModel,
)
def get_elda_mem_unfriendly(self):
return EnsembleLda(
corpus=common_corpus, id2word=common_dictionary, num_topics=NUM_TOPICS,
passes=PASSES, num_models=NUM_MODELS, random_state=RANDOM_STATE,
memory_friendly_ttda=False, topic_model_class=LdaModel,
)
def assert_ttda_is_valid(self, elda):
"""Check that ttda has one or more topic and that term probabilities add to one."""
assert len(elda.ttda) > 0
sum_over_terms = elda.ttda.sum(axis=1)
expected_sum_over_terms = np.ones(len(elda.ttda)).astype(np.float32)
np.testing.assert_allclose(sum_over_terms, expected_sum_over_terms, rtol=1e-04)
def test_elda(self):
elda = self.get_elda()
# given that the random_state doesn't change, it should
# always be 2 detected topics in this setup.
assert elda.stable_topics.shape[1] == len(common_dictionary)
assert len(elda.ttda) == NUM_MODELS * NUM_TOPICS
self.assert_ttda_is_valid(elda)
def test_backwards_compatibility_with_persisted_model(self):
elda = self.get_elda()
# compare with a pre-trained reference model
loaded_elda = EnsembleLda.load(datapath('ensemblelda'))
np.testing.assert_allclose(elda.ttda, loaded_elda.ttda, rtol=RTOL)
atol = loaded_elda.asymmetric_distance_matrix.max() * 1e-05
np.testing.assert_allclose(
elda.asymmetric_distance_matrix,
loaded_elda.asymmetric_distance_matrix, atol=atol,
)
def test_recluster(self):
# the following test is quite specific to the current implementation and not part of any api,
# but it makes improving those sections of the code easier as long as sorted_clusters and the
# cluster_model results are supposed to stay the same. Potentially this test will deprecate.
elda = EnsembleLda.load(datapath('ensemblelda'))
loaded_cluster_model_results = deepcopy(elda.cluster_model.results)
loaded_valid_clusters = deepcopy(elda.valid_clusters)
loaded_stable_topics = deepcopy(elda.get_topics())
# continue training with the distance matrix of the pretrained reference and see if
# the generated clusters match.
elda.asymmetric_distance_matrix_outdated = True
elda.recluster()
self.assert_clustering_results_equal(elda.cluster_model.results, loaded_cluster_model_results)
assert elda.valid_clusters == loaded_valid_clusters
np.testing.assert_allclose(elda.get_topics(), loaded_stable_topics, rtol=RTOL)
def test_recluster_does_nothing_when_stable_topics_already_found(self):
elda = self.get_elda()
# reclustering shouldn't change anything without
# added models or different parameters
elda.recluster()
assert elda.stable_topics.shape[1] == len(common_dictionary)
assert len(elda.ttda) == NUM_MODELS * NUM_TOPICS
self.assert_ttda_is_valid(elda)
def test_not_trained_given_zero_passes(self):
elda = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary, num_topics=NUM_TOPICS,
passes=0, num_models=NUM_MODELS, random_state=RANDOM_STATE,
)
assert len(elda.ttda) == 0
def test_not_trained_given_no_corpus(self):
elda = EnsembleLda(
id2word=common_dictionary, num_topics=NUM_TOPICS,
passes=PASSES, num_models=NUM_MODELS, random_state=RANDOM_STATE,
)
assert len(elda.ttda) == 0
def test_not_trained_given_zero_iterations(self):
elda = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary, num_topics=NUM_TOPICS,
iterations=0, num_models=NUM_MODELS, random_state=RANDOM_STATE,
)
assert len(elda.ttda) == 0
def test_not_trained_given_zero_models(self):
elda = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary, num_topics=NUM_TOPICS,
passes=PASSES, num_models=0, random_state=RANDOM_STATE
)
assert len(elda.ttda) == 0
def test_mem_unfriendly(self):
# elda_mem_unfriendly and self.elda should have topics that are
# the same up to floating point variations caused by the two different
# implementations
elda = self.get_elda()
elda_mem_unfriendly = self.get_elda_mem_unfriendly()
assert len(elda_mem_unfriendly.tms) == NUM_MODELS
np.testing.assert_allclose(elda.ttda, elda_mem_unfriendly.ttda, rtol=RTOL)
np.testing.assert_allclose(elda.get_topics(), elda_mem_unfriendly.get_topics(), rtol=RTOL)
self.assert_ttda_is_valid(elda_mem_unfriendly)
def test_generate_gensim_representation(self):
elda = self.get_elda()
gensim_model = elda.generate_gensim_representation()
topics = gensim_model.get_topics()
np.testing.assert_allclose(elda.get_topics(), topics, rtol=RTOL)
def assert_clustering_results_equal(self, clustering_results_1, clustering_results_2):
"""Assert important attributes of the cluster results"""
np.testing.assert_array_equal(
[element.label for element in clustering_results_1],
[element.label for element in clustering_results_2],
)
np.testing.assert_array_equal(
[element.is_core for element in clustering_results_1],
[element.is_core for element in clustering_results_2],
)
def test_persisting(self):
elda = self.get_elda()
elda_mem_unfriendly = self.get_elda_mem_unfriendly()
fname = get_tmpfile('gensim_models_ensemblelda')
elda.save(fname)
loaded_elda = EnsembleLda.load(fname)
# storing the ensemble without memory_friendy_ttda
elda_mem_unfriendly.save(fname)
loaded_elda_mem_unfriendly = EnsembleLda.load(fname)
# topic_model_class will be lazy loaded and should be None first
assert loaded_elda.topic_model_class is None
# was it stored and loaded correctly?
# memory friendly.
loaded_elda_representation = loaded_elda.generate_gensim_representation()
# generating the representation also lazily loads the topic_model_class
assert loaded_elda.topic_model_class == LdaModel
topics = loaded_elda_representation.get_topics()
ttda = loaded_elda.ttda
amatrix = loaded_elda.asymmetric_distance_matrix
np.testing.assert_allclose(elda.get_topics(), topics, rtol=RTOL)
np.testing.assert_allclose(elda.ttda, ttda, rtol=RTOL)
np.testing.assert_allclose(elda.asymmetric_distance_matrix, amatrix, rtol=RTOL)
expected_clustering_results = elda.cluster_model.results
loaded_clustering_results = loaded_elda.cluster_model.results
self.assert_clustering_results_equal(expected_clustering_results, loaded_clustering_results)
# memory unfriendly
loaded_elda_mem_unfriendly_representation = loaded_elda_mem_unfriendly.generate_gensim_representation()
topics = loaded_elda_mem_unfriendly_representation.get_topics()
np.testing.assert_allclose(elda.get_topics(), topics, rtol=RTOL)
def test_multiprocessing(self):
# same configuration
random_state = RANDOM_STATE
# use 3 processes for the ensemble and the distance,
# so that the 4 models and 8 topics cannot be distributed
# to each worker evenly
workers = 3
# memory friendly. contains List of topic word distributions
elda = self.get_elda()
elda_multiprocessing = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary, topic_model_class=LdaModel,
num_topics=NUM_TOPICS, passes=PASSES, num_models=NUM_MODELS,
random_state=random_state, ensemble_workers=workers, distance_workers=workers,
)
# memory unfriendly. contains List of models
elda_mem_unfriendly = self.get_elda_mem_unfriendly()
elda_multiprocessing_mem_unfriendly = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary, topic_model_class=LdaModel,
num_topics=NUM_TOPICS, passes=PASSES, num_models=NUM_MODELS,
random_state=random_state, ensemble_workers=workers, distance_workers=workers,
memory_friendly_ttda=False,
)
np.testing.assert_allclose(
elda.get_topics(),
elda_multiprocessing.get_topics(),
rtol=RTOL
)
np.testing.assert_allclose(
elda_mem_unfriendly.get_topics(),
elda_multiprocessing_mem_unfriendly.get_topics(),
rtol=RTOL
)
def test_add_models_to_empty(self):
elda = self.get_elda()
ensemble = EnsembleLda(id2word=common_dictionary, num_models=0)
ensemble.add_model(elda.ttda[0:1])
ensemble.add_model(elda.ttda[1:])
ensemble.recluster()
np.testing.assert_allclose(ensemble.get_topics(), elda.get_topics(), rtol=RTOL)
# persisting an ensemble that is entirely built from existing ttdas
fname = get_tmpfile('gensim_models_ensemblelda')
ensemble.save(fname)
loaded_ensemble = EnsembleLda.load(fname)
np.testing.assert_allclose(loaded_ensemble.get_topics(), elda.get_topics(), rtol=RTOL)
self.test_inference(loaded_ensemble)
def test_add_models(self):
# make sure countings and sizes after adding are correct
# create new models and add other models to them.
# there are a ton of configurations for the first parameter possible,
# try them all
# quickly train something that can be used for counting results
num_new_models = 3
num_new_topics = 3
# 1. memory friendly
base_elda = self.get_elda()
cumulative_elda = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary,
num_topics=num_new_topics, passes=1, num_models=num_new_models,
iterations=1, random_state=RANDOM_STATE, topic_model_class=LdaMulticore,
workers=3, ensemble_workers=2,
)
# 1.1 ttda
num_topics_before_add_model = len(cumulative_elda.ttda)
num_models_before_add_model = cumulative_elda.num_models
cumulative_elda.add_model(base_elda.ttda)
assert len(cumulative_elda.ttda) == num_topics_before_add_model + len(base_elda.ttda)
assert cumulative_elda.num_models == num_models_before_add_model + 1 # defaults to 1 for one ttda matrix
# 1.2 an ensemble
num_topics_before_add_model = len(cumulative_elda.ttda)
num_models_before_add_model = cumulative_elda.num_models
cumulative_elda.add_model(base_elda, 5)
assert len(cumulative_elda.ttda) == num_topics_before_add_model + len(base_elda.ttda)
assert cumulative_elda.num_models == num_models_before_add_model + 5
# 1.3 a list of ensembles
num_topics_before_add_model = len(cumulative_elda.ttda)
num_models_before_add_model = cumulative_elda.num_models
# it should be totally legit to add a memory unfriendly object to a memory friendly one
base_elda_mem_unfriendly = self.get_elda_mem_unfriendly()
cumulative_elda.add_model([base_elda, base_elda_mem_unfriendly])
assert len(cumulative_elda.ttda) == num_topics_before_add_model + 2 * len(base_elda.ttda)
assert cumulative_elda.num_models == num_models_before_add_model + 2 * NUM_MODELS
# 1.4 a single gensim model
model = base_elda.classic_model_representation
num_topics_before_add_model = len(cumulative_elda.ttda)
num_models_before_add_model = cumulative_elda.num_models
cumulative_elda.add_model(model)
assert len(cumulative_elda.ttda) == num_topics_before_add_model + len(model.get_topics())
assert cumulative_elda.num_models == num_models_before_add_model + 1
# 1.5 a list gensim models
num_topics_before_add_model = len(cumulative_elda.ttda)
num_models_before_add_model = cumulative_elda.num_models
cumulative_elda.add_model([model, model])
assert len(cumulative_elda.ttda) == num_topics_before_add_model + 2 * len(model.get_topics())
assert cumulative_elda.num_models == num_models_before_add_model + 2
self.assert_ttda_is_valid(cumulative_elda)
# 2. memory unfriendly
elda_mem_unfriendly = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary,
num_topics=num_new_topics, passes=1, num_models=num_new_models,
iterations=1, random_state=RANDOM_STATE, topic_model_class=LdaMulticore,
workers=3, ensemble_workers=2, memory_friendly_ttda=False,
)
# 2.1 a single ensemble
num_topics_before_add_model = len(elda_mem_unfriendly.tms)
num_models_before_add_model = elda_mem_unfriendly.num_models
elda_mem_unfriendly.add_model(base_elda_mem_unfriendly)
assert len(elda_mem_unfriendly.tms) == num_topics_before_add_model + NUM_MODELS
assert elda_mem_unfriendly.num_models == num_models_before_add_model + NUM_MODELS
# 2.2 a list of ensembles
num_topics_before_add_model = len(elda_mem_unfriendly.tms)
num_models_before_add_model = elda_mem_unfriendly.num_models
elda_mem_unfriendly.add_model([base_elda_mem_unfriendly, base_elda_mem_unfriendly])
assert len(elda_mem_unfriendly.tms) == num_topics_before_add_model + 2 * NUM_MODELS
assert elda_mem_unfriendly.num_models == num_models_before_add_model + 2 * NUM_MODELS
# 2.3 a single gensim model
num_topics_before_add_model = len(elda_mem_unfriendly.tms)
num_models_before_add_model = elda_mem_unfriendly.num_models
elda_mem_unfriendly.add_model(base_elda_mem_unfriendly.tms[0])
assert len(elda_mem_unfriendly.tms) == num_topics_before_add_model + 1
assert elda_mem_unfriendly.num_models == num_models_before_add_model + 1
# 2.4 a list of gensim models
num_topics_before_add_model = len(elda_mem_unfriendly.tms)
num_models_before_add_model = elda_mem_unfriendly.num_models
elda_mem_unfriendly.add_model(base_elda_mem_unfriendly.tms)
assert len(elda_mem_unfriendly.tms) == num_topics_before_add_model + NUM_MODELS
assert elda_mem_unfriendly.num_models == num_models_before_add_model + NUM_MODELS
# 2.5 topic term distributions should throw errors, because the
# actual models are needed for the memory unfriendly ensemble
num_topics_before_add_model = len(elda_mem_unfriendly.tms)
num_models_before_add_model = elda_mem_unfriendly.num_models
with pytest.raises(ValueError):
elda_mem_unfriendly.add_model(base_elda_mem_unfriendly.tms[0].get_topics())
# remains unchanged
assert len(elda_mem_unfriendly.tms) == num_topics_before_add_model
assert elda_mem_unfriendly.num_models == num_models_before_add_model
assert elda_mem_unfriendly.num_models == len(elda_mem_unfriendly.tms)
self.assert_ttda_is_valid(elda_mem_unfriendly)
def test_add_and_recluster(self):
# See if after adding a model, the model still makes sense
num_new_models = 3
num_new_topics = 3
random_state = 1
# train models two sets of models (mem friendly and unfriendly)
elda_1 = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary,
num_topics=num_new_topics, passes=10, num_models=num_new_models,
iterations=30, random_state=random_state, topic_model_class='lda',
distance_workers=4,
)
elda_mem_unfriendly_1 = EnsembleLda(
corpus=common_corpus, id2word=common_dictionary,
num_topics=num_new_topics, passes=10, num_models=num_new_models,
iterations=30, random_state=random_state, topic_model_class=LdaModel,
distance_workers=4, memory_friendly_ttda=False,
)
elda_2 = self.get_elda()
elda_mem_unfriendly_2 = self.get_elda_mem_unfriendly()
assert elda_1.random_state != elda_2.random_state
assert elda_mem_unfriendly_1.random_state != elda_mem_unfriendly_2.random_state
# both should be similar
np.testing.assert_allclose(elda_1.ttda, elda_mem_unfriendly_1.ttda, rtol=RTOL)
np.testing.assert_allclose(elda_1.get_topics(), elda_mem_unfriendly_1.get_topics(), rtol=RTOL)
# and every next step applied to both should result in similar results
# 1. adding to ttda and tms
elda_1.add_model(elda_2)
elda_mem_unfriendly_1.add_model(elda_mem_unfriendly_2)
np.testing.assert_allclose(elda_1.ttda, elda_mem_unfriendly_1.ttda, rtol=RTOL)
assert len(elda_1.ttda) == len(elda_2.ttda) + num_new_models * num_new_topics
assert len(elda_mem_unfriendly_1.ttda) == len(elda_mem_unfriendly_2.ttda) + num_new_models * num_new_topics
assert len(elda_mem_unfriendly_1.tms) == NUM_MODELS + num_new_models
self.assert_ttda_is_valid(elda_1)
self.assert_ttda_is_valid(elda_mem_unfriendly_1)
# 2. distance matrix
elda_1._generate_asymmetric_distance_matrix()
elda_mem_unfriendly_1._generate_asymmetric_distance_matrix()
np.testing.assert_allclose(
elda_1.asymmetric_distance_matrix,
elda_mem_unfriendly_1.asymmetric_distance_matrix,
)
# 3. CBDBSCAN results
elda_1._generate_topic_clusters()
elda_mem_unfriendly_1._generate_topic_clusters()
clustering_results = elda_1.cluster_model.results
mem_unfriendly_clustering_results = elda_mem_unfriendly_1.cluster_model.results
self.assert_clustering_results_equal(clustering_results, mem_unfriendly_clustering_results)
# 4. finally, the stable topics
elda_1._generate_stable_topics()
elda_mem_unfriendly_1._generate_stable_topics()
np.testing.assert_allclose(
elda_1.get_topics(),
elda_mem_unfriendly_1.get_topics(),
)
elda_1.generate_gensim_representation()
elda_mem_unfriendly_1.generate_gensim_representation()
# same random state, hence topics should be still similar
np.testing.assert_allclose(elda_1.get_topics(), elda_mem_unfriendly_1.get_topics(), rtol=RTOL)
def test_inference(self, elda=None):
if elda is None:
elda = self.get_elda()
# get the most likely token id from topic 0
max_id = np.argmax(elda.get_topics()[0, :])
assert elda.classic_model_representation.iterations > 0
# topic 0 should be dominant in the inference.
# the difference between the probabilities should be significant and larger than 0.3
inferred = elda[[(max_id, 1)]]
assert inferred[0][1] - 0.3 > inferred[1][1]
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.WARN)
unittest.main()
| TestEnsembleLda |
python | pytorch__pytorch | torch/ao/nn/qat/dynamic/modules/linear.py | {
"start": 156,
"end": 1215
} | class ____(torch.ao.nn.qat.Linear):
r"""
A linear module attached with FakeQuantize modules for weight,
used for dynamic quantization aware training.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
for documentation.
Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
default.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
qconfig: Optional["QConfig"] = None,
device: int | str | torch.device | None = None,
dtype: str | None = None,
) -> None:
super().__init__(in_features, out_features, bias, qconfig, device, dtype)
if not torch.ao.quantization.qconfig._activation_is_memoryless(qconfig): # type: ignore[arg-type]
raise ValueError(
"Dynamic QAT requires a memoryless observer."
+ "This means a MovingAverage observer with averaging constant equal to 1"
)
| Linear |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/test/steps/manifest_only_connectors.py | {
"start": 2804,
"end": 8358
} | class ____(PytestStep):
"""A step to run unit tests for a manifest-only connector"""
title = "Manifest-only unit tests"
test_directory_name = "unit_tests"
common_test_dependencies = ["freezegun", "pytest", "pytest-mock", "requests-mock"]
async def install_testing_environment(
self,
built_connector_container: Container,
test_config_file_name: str,
test_config_file: File,
extra_dependencies_names: Sequence[str],
) -> Container:
"""Install the testing environment for manifest-only connectors."""
connector_name = self.context.connector.technical_name
# Use a simpler path structure to match what the CDK expects
test_dir = "/tmp/test_environment"
connector_base_path = f"{test_dir}/airbyte-integrations/connectors"
connector_path = f"{connector_base_path}/{connector_name}"
# Get the proper user from the container
user = await built_connector_container.user()
if not user:
user = "root"
# Set up base test environment with reset entrypoint
test_environment = built_connector_container.with_entrypoint([])
# Create test directories with proper permissions
test_environment = (
test_environment.with_user("root") # Temporarily switch to root to create directories
.with_exec(["mkdir", "-p", test_dir, connector_base_path, connector_path, f"{connector_path}/{self.test_directory_name}"])
.with_workdir(test_dir)
)
# Mount the connector directory and files
connector_dir = await self.context.get_connector_dir()
# Check what files are in the connector directory to identify components.py
connector_entries = await connector_dir.entries()
self.logger.info(f"Files in connector directory: {connector_entries}")
# Mount the entire connector directory to ensure all files (especially components.py) are available
test_environment = test_environment.with_mounted_directory(connector_path, connector_dir)
# Get and mount the unit_tests directory specifically
unit_tests_dir = connector_dir.directory(self.test_directory_name)
unit_tests_path = f"{connector_path}/{self.test_directory_name}"
# Mount secrets
secret_mounting_function = await secrets.mounted_connector_secrets(self.context, f"{test_dir}/secrets", self.secrets, owner=user)
# Apply secrets and set up Python path
test_environment = test_environment.with_(secret_mounting_function).with_env_variable(
"PYTHONPATH", f"{connector_base_path}:{connector_path}:{unit_tests_path}:{test_dir}"
)
# Create symlink to source-declarative-manifest
test_environment = test_environment.with_exec(["ln", "-s", "/source-declarative-manifest", connector_path])
# Set working directory to unit tests path
test_environment = test_environment.with_workdir(unit_tests_path)
# Install Poetry
test_environment = test_environment.with_exec(["echo", "=== INSTALLING POETRY ==="]).with_exec(["pip", "install", "poetry"])
# Install dependencies directly with Poetry
test_environment = test_environment.with_exec(
["poetry", "config", "virtualenvs.create", "false"] # Disable virtualenv creation
).with_exec(
["poetry", "install", "--no-root"] # Install dependencies without the root package
)
# Install common test dependencies. This shouldn't be needed as we're now
# using the connector's pyproject.toml, but it's here to support MO connectors
# that might have dependencies not listed in the pyproject.toml.
if self.common_test_dependencies:
test_environment = test_environment.with_exec(["echo", "=== INSTALLING COMMON TEST DEPENDENCIES ==="]).with_exec(
["pip", "install"] + self.common_test_dependencies
)
# Set ownership of all files to the proper user and switch to that user
test_environment = test_environment.with_exec(["chown", "-R", f"{user}:{user}", test_dir]).with_user(user)
return test_environment
async def get_config_file_name_and_file(self) -> Tuple[str, File]:
"""
Get the config file name and file to use for pytest.
For manifest-only connectors, we expect the poetry config to be found
in the unit_tests directory.
"""
connector_name = self.context.connector.technical_name
connector_dir = await self.context.get_connector_dir()
unit_tests_dir = connector_dir.directory(self.test_directory_name)
unit_tests_entries = await unit_tests_dir.entries()
if self.PYPROJECT_FILE_NAME in unit_tests_entries:
config_file_name = self.PYPROJECT_FILE_NAME
test_config = unit_tests_dir.file(self.PYPROJECT_FILE_NAME)
self.logger.info(f"Found {self.PYPROJECT_FILE_NAME} in the unit_tests directory for {connector_name}, using it for testing.")
return config_file_name, test_config
else:
raise FileNotFoundError(f"Could not find {self.PYPROJECT_FILE_NAME} in the unit_tests directory for {connector_name}.")
def get_pytest_command(self, test_config_file_name: str) -> List[str]:
"""Get the pytest command to run."""
cmd = ["pytest", "-v", ".", "-c", test_config_file_name] + self.params_as_cli_options
return ["poetry", "run"] + cmd
| ManifestOnlyConnectorUnitTests |
python | sympy__sympy | sympy/physics/continuum_mechanics/truss.py | {
"start": 765,
"end": 44962
} | class ____:
"""
A Truss is an assembly of members such as beams,
connected by nodes, that create a rigid structure.
In engineering, a truss is a structure that
consists of two-force members only.
Trusses are extremely important in engineering applications
and can be seen in numerous real-world applications like bridges.
Examples
========
There is a Truss consisting of four nodes and five
members connecting the nodes. A force P acts
downward on the node D and there also exist pinned
and roller joints on the nodes A and B respectively.
.. image:: truss_example.png
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(("node_1", 0, 0), ("node_2", 6, 0), ("node_3", 2, 2), ("node_4", 2, 0))
>>> t.add_member(("member_1", "node_1", "node_4"), ("member_2", "node_2", "node_4"), ("member_3", "node_1", "node_3"))
>>> t.add_member(("member_4", "node_2", "node_3"), ("member_5", "node_3", "node_4"))
>>> t.apply_load(("node_4", 10, 270))
>>> t.apply_support(("node_1", "pinned"), ("node_2", "roller"))
"""
def __init__(self):
"""
Initializes the class
"""
self._nodes = []
self._members = {}
self._loads = {}
self._supports = {}
self._node_labels = []
self._node_positions = []
self._node_position_x = []
self._node_position_y = []
self._nodes_occupied = {}
self._member_lengths = {}
self._reaction_loads = {}
self._internal_forces = {}
self._node_coordinates = {}
@property
def nodes(self):
"""
Returns the nodes of the truss along with their positions.
"""
return self._nodes
@property
def node_labels(self):
"""
Returns the node labels of the truss.
"""
return self._node_labels
@property
def node_positions(self):
"""
Returns the positions of the nodes of the truss.
"""
return self._node_positions
@property
def members(self):
"""
Returns the members of the truss along with the start and end points.
"""
return self._members
@property
def member_lengths(self):
"""
Returns the length of each member of the truss.
"""
return self._member_lengths
@property
def supports(self):
"""
Returns the nodes with provided supports along with the kind of support provided i.e.
pinned or roller.
"""
return self._supports
@property
def loads(self):
"""
Returns the loads acting on the truss.
"""
return self._loads
@property
def reaction_loads(self):
"""
Returns the reaction forces for all supports which are all initialized to 0.
"""
return self._reaction_loads
@property
def internal_forces(self):
"""
Returns the internal forces for all members which are all initialized to 0.
"""
return self._internal_forces
def add_node(self, *args):
"""
This method adds a node to the truss along with its name/label and its location.
Multiple nodes can be added at the same time.
Parameters
==========
The input(s) for this method are tuples of the form (label, x, y).
label: String or a Symbol
The label for a node. It is the only way to identify a particular node.
x: Sympifyable
The x-coordinate of the position of the node.
y: Sympifyable
The y-coordinate of the position of the node.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(('A', 0, 0))
>>> t.nodes
[('A', 0, 0)]
>>> t.add_node(('B', 3, 0), ('C', 4, 1))
>>> t.nodes
[('A', 0, 0), ('B', 3, 0), ('C', 4, 1)]
"""
for i in args:
label = i[0]
x = i[1]
x = sympify(x)
y=i[2]
y = sympify(y)
if label in self._node_coordinates:
raise ValueError("Node needs to have a unique label")
elif [x, y] in self._node_coordinates.values():
raise ValueError("A node already exists at the given position")
else :
self._nodes.append((label, x, y))
self._node_labels.append(label)
self._node_positions.append((x, y))
self._node_position_x.append(x)
self._node_position_y.append(y)
self._node_coordinates[label] = [x, y]
def remove_node(self, *args):
"""
This method removes a node from the truss.
Multiple nodes can be removed at the same time.
Parameters
==========
The input(s) for this method are the labels of the nodes to be removed.
label: String or Symbol
The label of the node to be removed.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0), ('C', 5, 0))
>>> t.nodes
[('A', 0, 0), ('B', 3, 0), ('C', 5, 0)]
>>> t.remove_node('A', 'C')
>>> t.nodes
[('B', 3, 0)]
"""
for label in args:
for i in range(len(self.nodes)):
if self._node_labels[i] == label:
x = self._node_position_x[i]
y = self._node_position_y[i]
if label not in self._node_coordinates:
raise ValueError("No such node exists in the truss")
else:
members_duplicate = self._members.copy()
for member in members_duplicate:
if label == self._members[member][0] or label == self._members[member][1]:
raise ValueError("The given node already has member attached to it")
self._nodes.remove((label, x, y))
self._node_labels.remove(label)
self._node_positions.remove((x, y))
self._node_position_x.remove(x)
self._node_position_y.remove(y)
if label in self._loads:
self._loads.pop(label)
if label in self._supports:
self._supports.pop(label)
self._node_coordinates.pop(label)
def add_member(self, *args):
"""
This method adds a member between any two nodes in the given truss.
Parameters
==========
The input(s) of the method are tuple(s) of the form (label, start, end).
label: String or Symbol
The label for a member. It is the only way to identify a particular member.
start: String or Symbol
The label of the starting point/node of the member.
end: String or Symbol
The label of the ending point/node of the member.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0), ('C', 2, 2))
>>> t.add_member(('AB', 'A', 'B'), ('BC', 'B', 'C'))
>>> t.members
{'AB': ['A', 'B'], 'BC': ['B', 'C']}
"""
for i in args:
label = i[0]
start = i[1]
end = i[2]
if start not in self._node_coordinates or end not in self._node_coordinates or start==end:
raise ValueError("The start and end points of the member must be unique nodes")
elif label in self._members:
raise ValueError("A member with the same label already exists for the truss")
elif self._nodes_occupied.get((start, end)):
raise ValueError("A member already exists between the two nodes")
else:
self._members[label] = [start, end]
self._member_lengths[label] = sqrt((self._node_coordinates[end][0]-self._node_coordinates[start][0])**2 + (self._node_coordinates[end][1]-self._node_coordinates[start][1])**2)
self._nodes_occupied[start, end] = True
self._nodes_occupied[end, start] = True
self._internal_forces[label] = 0
def remove_member(self, *args):
"""
This method removes members from the given truss.
Parameters
==========
labels: String or Symbol
The label for the member to be removed.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0), ('C', 2, 2))
>>> t.add_member(('AB', 'A', 'B'), ('AC', 'A', 'C'), ('BC', 'B', 'C'))
>>> t.members
{'AB': ['A', 'B'], 'AC': ['A', 'C'], 'BC': ['B', 'C']}
>>> t.remove_member('AC', 'BC')
>>> t.members
{'AB': ['A', 'B']}
"""
for label in args:
if label not in self._members:
raise ValueError("No such member exists in the Truss")
else:
self._nodes_occupied.pop((self._members[label][0], self._members[label][1]))
self._nodes_occupied.pop((self._members[label][1], self._members[label][0]))
self._members.pop(label)
self._member_lengths.pop(label)
self._internal_forces.pop(label)
def change_node_label(self, *args):
"""
This method changes the label(s) of the specified node(s).
Parameters
==========
The input(s) of this method are tuple(s) of the form (label, new_label).
label: String or Symbol
The label of the node for which the label has
to be changed.
new_label: String or Symbol
The new label of the node.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0))
>>> t.nodes
[('A', 0, 0), ('B', 3, 0)]
>>> t.change_node_label(('A', 'C'), ('B', 'D'))
>>> t.nodes
[('C', 0, 0), ('D', 3, 0)]
"""
for i in args:
label = i[0]
new_label = i[1]
if label not in self._node_coordinates:
raise ValueError("No such node exists for the Truss")
elif new_label in self._node_coordinates:
raise ValueError("A node with the given label already exists")
else:
for node in self._nodes:
if node[0] == label:
self._nodes[self._nodes.index((label, node[1], node[2]))] = (new_label, node[1], node[2])
self._node_labels[self._node_labels.index(node[0])] = new_label
self._node_coordinates[new_label] = self._node_coordinates[label]
self._node_coordinates.pop(label)
if node[0] in self._supports:
self._supports[new_label] = self._supports[node[0]]
self._supports.pop(node[0])
if new_label in self._supports:
if self._supports[new_label] == 'pinned':
if 'R_'+str(label)+'_x' in self._reaction_loads and 'R_'+str(label)+'_y' in self._reaction_loads:
self._reaction_loads['R_'+str(new_label)+'_x'] = self._reaction_loads['R_'+str(label)+'_x']
self._reaction_loads['R_'+str(new_label)+'_y'] = self._reaction_loads['R_'+str(label)+'_y']
self._reaction_loads.pop('R_'+str(label)+'_x')
self._reaction_loads.pop('R_'+str(label)+'_y')
self._loads[new_label] = self._loads[label]
for load in self._loads[new_label]:
if load[1] == 90:
load[0] -= Symbol('R_'+str(label)+'_y')
if load[0] == 0:
self._loads[label].remove(load)
break
for load in self._loads[new_label]:
if load[1] == 0:
load[0] -= Symbol('R_'+str(label)+'_x')
if load[0] == 0:
self._loads[label].remove(load)
break
self.apply_load(new_label, Symbol('R_'+str(new_label)+'_x'), 0)
self.apply_load(new_label, Symbol('R_'+str(new_label)+'_y'), 90)
self._loads.pop(label)
elif self._supports[new_label] == 'roller':
self._loads[new_label] = self._loads[label]
for load in self._loads[label]:
if load[1] == 90:
load[0] -= Symbol('R_'+str(label)+'_y')
if load[0] == 0:
self._loads[label].remove(load)
break
self.apply_load(new_label, Symbol('R_'+str(new_label)+'_y'), 90)
self._loads.pop(label)
else:
if label in self._loads:
self._loads[new_label] = self._loads[label]
self._loads.pop(label)
for member in self._members:
if self._members[member][0] == node[0]:
self._members[member][0] = new_label
self._nodes_occupied[(new_label, self._members[member][1])] = True
self._nodes_occupied[(self._members[member][1], new_label)] = True
self._nodes_occupied.pop((label, self._members[member][1]))
self._nodes_occupied.pop((self._members[member][1], label))
elif self._members[member][1] == node[0]:
self._members[member][1] = new_label
self._nodes_occupied[(self._members[member][0], new_label)] = True
self._nodes_occupied[(new_label, self._members[member][0])] = True
self._nodes_occupied.pop((self._members[member][0], label))
self._nodes_occupied.pop((label, self._members[member][0]))
def change_member_label(self, *args):
"""
This method changes the label(s) of the specified member(s).
Parameters
==========
The input(s) of this method are tuple(s) of the form (label, new_label)
label: String or Symbol
The label of the member for which the label has
to be changed.
new_label: String or Symbol
The new label of the member.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0), ('D', 5, 0))
>>> t.nodes
[('A', 0, 0), ('B', 3, 0), ('D', 5, 0)]
>>> t.change_node_label(('A', 'C'))
>>> t.nodes
[('C', 0, 0), ('B', 3, 0), ('D', 5, 0)]
>>> t.add_member(('BC', 'B', 'C'), ('BD', 'B', 'D'))
>>> t.members
{'BC': ['B', 'C'], 'BD': ['B', 'D']}
>>> t.change_member_label(('BC', 'BC_new'), ('BD', 'BD_new'))
>>> t.members
{'BC_new': ['B', 'C'], 'BD_new': ['B', 'D']}
"""
for i in args:
label = i[0]
new_label = i[1]
if label not in self._members:
raise ValueError("No such member exists for the Truss")
else:
members_duplicate = list(self._members).copy()
for member in members_duplicate:
if member == label:
self._members[new_label] = [self._members[member][0], self._members[member][1]]
self._members.pop(label)
self._member_lengths[new_label] = self._member_lengths[label]
self._member_lengths.pop(label)
self._internal_forces[new_label] = self._internal_forces[label]
self._internal_forces.pop(label)
def apply_load(self, *args):
"""
This method applies external load(s) at the specified node(s).
Parameters
==========
The input(s) of the method are tuple(s) of the form (location, magnitude, direction).
location: String or Symbol
Label of the Node at which load is applied.
magnitude: Sympifyable
Magnitude of the load applied. It must always be positive and any changes in
the direction of the load are not reflected here.
direction: Sympifyable
The angle, in degrees, that the load vector makes with the horizontal
in the counter-clockwise direction. It takes the values 0 to 360,
inclusive.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> from sympy import symbols
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0))
>>> P = symbols('P')
>>> t.apply_load(('A', P, 90), ('A', P/2, 45), ('A', P/4, 90))
>>> t.loads
{'A': [[P, 90], [P/2, 45], [P/4, 90]]}
"""
for i in args:
location = i[0]
magnitude = i[1]
direction = i[2]
magnitude = sympify(magnitude)
direction = sympify(direction)
if location not in self._node_coordinates:
raise ValueError("Load must be applied at a known node")
else:
if location in self._loads:
self._loads[location].append([magnitude, direction])
else:
self._loads[location] = [[magnitude, direction]]
def remove_load(self, *args):
"""
This method removes already
present external load(s) at specified node(s).
Parameters
==========
The input(s) of this method are tuple(s) of the form (location, magnitude, direction).
location: String or Symbol
Label of the Node at which load is applied and is to be removed.
magnitude: Sympifyable
Magnitude of the load applied.
direction: Sympifyable
The angle, in degrees, that the load vector makes with the horizontal
in the counter-clockwise direction. It takes the values 0 to 360,
inclusive.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> from sympy import symbols
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0))
>>> P = symbols('P')
>>> t.apply_load(('A', P, 90), ('A', P/2, 45), ('A', P/4, 90))
>>> t.loads
{'A': [[P, 90], [P/2, 45], [P/4, 90]]}
>>> t.remove_load(('A', P/4, 90), ('A', P/2, 45))
>>> t.loads
{'A': [[P, 90]]}
"""
for i in args:
location = i[0]
magnitude = i[1]
direction = i[2]
magnitude = sympify(magnitude)
direction = sympify(direction)
if location not in self._node_coordinates:
raise ValueError("Load must be removed from a known node")
else:
if [magnitude, direction] not in self._loads[location]:
raise ValueError("No load of this magnitude and direction has been applied at this node")
else:
self._loads[location].remove([magnitude, direction])
if self._loads[location] == []:
self._loads.pop(location)
def apply_support(self, *args):
"""
This method adds a pinned or roller support at specified node(s).
Parameters
==========
The input(s) of this method are of the form (location, type).
location: String or Symbol
Label of the Node at which support is added.
type: String
Type of the support being provided at the node.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0))
>>> t.apply_support(('A', 'pinned'), ('B', 'roller'))
>>> t.supports
{'A': 'pinned', 'B': 'roller'}
"""
for i in args:
location = i[0]
type = i[1]
if location not in self._node_coordinates:
raise ValueError("Support must be added on a known node")
else:
if location not in self._supports:
if type == 'pinned':
self.apply_load((location, Symbol('R_'+str(location)+'_x'), 0))
self.apply_load((location, Symbol('R_'+str(location)+'_y'), 90))
elif type == 'roller':
self.apply_load((location, Symbol('R_'+str(location)+'_y'), 90))
elif self._supports[location] == 'pinned':
if type == 'roller':
self.remove_load((location, Symbol('R_'+str(location)+'_x'), 0))
elif self._supports[location] == 'roller':
if type == 'pinned':
self.apply_load((location, Symbol('R_'+str(location)+'_x'), 0))
self._supports[location] = type
def remove_support(self, *args):
"""
This method removes support from specified node(s.)
Parameters
==========
locations: String or Symbol
Label of the Node(s) at which support is to be removed.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(('A', 0, 0), ('B', 3, 0))
>>> t.apply_support(('A', 'pinned'), ('B', 'roller'))
>>> t.supports
{'A': 'pinned', 'B': 'roller'}
>>> t.remove_support('A','B')
>>> t.supports
{}
"""
for location in args:
if location not in self._node_coordinates:
raise ValueError("No such node exists in the Truss")
elif location not in self._supports:
raise ValueError("No support has been added to the given node")
else:
if self._supports[location] == 'pinned':
self.remove_load((location, Symbol('R_'+str(location)+'_x'), 0))
self.remove_load((location, Symbol('R_'+str(location)+'_y'), 90))
elif self._supports[location] == 'roller':
self.remove_load((location, Symbol('R_'+str(location)+'_y'), 90))
self._supports.pop(location)
def solve(self):
"""
This method solves for all reaction forces of all supports and all internal forces
of all the members in the truss, provided the Truss is solvable.
A Truss is solvable if the following condition is met,
2n >= r + m
Where n is the number of nodes, r is the number of reaction forces, where each pinned
support has 2 reaction forces and each roller has 1, and m is the number of members.
The given condition is derived from the fact that a system of equations is solvable
only when the number of variables is lesser than or equal to the number of equations.
Equilibrium Equations in x and y directions give two equations per node giving 2n number
equations. However, the truss needs to be stable as well and may be unstable if 2n > r + m.
The number of variables is simply the sum of the number of reaction forces and member
forces.
.. note::
The sign convention for the internal forces present in a member revolves around whether each
force is compressive or tensile. While forming equations for each node, internal force due
to a member on the node is assumed to be away from the node i.e. each force is assumed to
be compressive by default. Hence, a positive value for an internal force implies the
presence of compressive force in the member and a negative value implies a tensile force.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node(("node_1", 0, 0), ("node_2", 6, 0), ("node_3", 2, 2), ("node_4", 2, 0))
>>> t.add_member(("member_1", "node_1", "node_4"), ("member_2", "node_2", "node_4"), ("member_3", "node_1", "node_3"))
>>> t.add_member(("member_4", "node_2", "node_3"), ("member_5", "node_3", "node_4"))
>>> t.apply_load(("node_4", 10, 270))
>>> t.apply_support(("node_1", "pinned"), ("node_2", "roller"))
>>> t.solve()
>>> t.reaction_loads
{'R_node_1_x': 0, 'R_node_1_y': 20/3, 'R_node_2_y': 10/3}
>>> t.internal_forces
{'member_1': 20/3, 'member_2': 20/3, 'member_3': -20*sqrt(2)/3, 'member_4': -10*sqrt(5)/3, 'member_5': 10}
"""
count_reaction_loads = 0
for node in self._nodes:
if node[0] in self._supports:
if self._supports[node[0]]=='pinned':
count_reaction_loads += 2
elif self._supports[node[0]]=='roller':
count_reaction_loads += 1
if 2*len(self._nodes) != len(self._members) + count_reaction_loads:
raise ValueError("The given truss cannot be solved")
coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))]
load_matrix = zeros(2*len(self.nodes), 1)
load_matrix_row = 0
for node in self._nodes:
if node[0] in self._loads:
for load in self._loads[node[0]]:
if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'):
load_matrix[load_matrix_row] -= load[0]*cos(pi*load[1]/180)
load_matrix[load_matrix_row + 1] -= load[0]*sin(pi*load[1]/180)
load_matrix_row += 2
cols = 0
row = 0
for node in self._nodes:
if node[0] in self._supports:
if self._supports[node[0]]=='pinned':
coefficients_matrix[row][cols] += 1
coefficients_matrix[row+1][cols+1] += 1
cols += 2
elif self._supports[node[0]]=='roller':
coefficients_matrix[row+1][cols] += 1
cols += 1
row += 2
for member in self._members:
start = self._members[member][0]
end = self._members[member][1]
length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2)
start_index = self._node_labels.index(start)
end_index = self._node_labels.index(end)
horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length
vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length
horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length
vertical_component_end = (self._node_coordinates[start][1]-self._node_coordinates[end][1])/length
coefficients_matrix[start_index*2][cols] += horizontal_component_start
coefficients_matrix[start_index*2+1][cols] += vertical_component_start
coefficients_matrix[end_index*2][cols] += horizontal_component_end
coefficients_matrix[end_index*2+1][cols] += vertical_component_end
cols += 1
forces_matrix = (Matrix(coefficients_matrix)**-1)*load_matrix
self._reaction_loads = {}
i = 0
min_load = inf
for node in self._nodes:
if node[0] in self._loads:
for load in self._loads[node[0]]:
if type(load[0]) not in [Symbol, Mul, Add]:
min_load = min(min_load, load[0])
for j in range(len(forces_matrix)):
if type(forces_matrix[j]) not in [Symbol, Mul, Add]:
if abs(forces_matrix[j]/min_load) <1E-10:
forces_matrix[j] = 0
for node in self._nodes:
if node[0] in self._supports:
if self._supports[node[0]]=='pinned':
self._reaction_loads['R_'+str(node[0])+'_x'] = forces_matrix[i]
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i+1]
i += 2
elif self._supports[node[0]]=='roller':
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i]
i += 1
for member in self._members:
self._internal_forces[member] = forces_matrix[i]
i += 1
return
@doctest_depends_on(modules=('numpy',))
def draw(self, subs_dict=None):
"""
Returns a plot object of the Truss with all its nodes, members,
supports and loads.
.. note::
The user must be careful while entering load values in their
directions. The draw function assumes a sign convention that
is used for plotting loads.
Given a right-handed coordinate system with XYZ coordinates,
the supports are assumed to be such that the reaction forces of a
pinned support is in the +X and +Y direction while those of a
roller support is in the +Y direction. For the load, the range
of angles, one can input goes all the way to 360 degrees which, in the
the plot is the angle that the load vector makes with the positive x-axis in the anticlockwise direction.
For example, for a 90-degree angle, the load will be a vertically
directed along +Y while a 270-degree angle denotes a vertical
load as well but along -Y.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> import math
>>> t = Truss()
>>> t.add_node(("A", -4, 0), ("B", 0, 0), ("C", 4, 0), ("D", 8, 0))
>>> t.add_node(("E", 6, 2/math.sqrt(3)))
>>> t.add_node(("F", 2, 2*math.sqrt(3)))
>>> t.add_node(("G", -2, 2/math.sqrt(3)))
>>> t.add_member(("AB","A","B"), ("BC","B","C"), ("CD","C","D"))
>>> t.add_member(("AG","A","G"), ("GB","G","B"), ("GF","G","F"))
>>> t.add_member(("BF","B","F"), ("FC","F","C"), ("CE","C","E"))
>>> t.add_member(("FE","F","E"), ("DE","D","E"))
>>> t.apply_support(("A","pinned"), ("D","roller"))
>>> t.apply_load(("G", 3, 90), ("E", 3, 90), ("F", 2, 90))
>>> p = t.draw()
>>> p # doctest: +ELLIPSIS
Plot object containing:
[0]: cartesian line: 1 for x over (1.0, 1.0)
...
>>> p.show()
"""
if not numpy:
raise ImportError("To use this function numpy module is required")
x = Symbol('x')
markers = []
annotations = []
rectangles = []
node_markers = self._draw_nodes(subs_dict)
markers += node_markers
member_rectangles = self._draw_members()
rectangles += member_rectangles
support_markers = self._draw_supports()
markers += support_markers
load_annotations = self._draw_loads()
annotations += load_annotations
xmax = -INF
xmin = INF
ymax = -INF
ymin = INF
for node in self._node_coordinates:
xmax = max(xmax, self._node_coordinates[node][0])
xmin = min(xmin, self._node_coordinates[node][0])
ymax = max(ymax, self._node_coordinates[node][1])
ymin = min(ymin, self._node_coordinates[node][1])
lim = max(xmax*1.1-xmin*0.8+1, ymax*1.1-ymin*0.8+1)
if lim==xmax*1.1-xmin*0.8+1:
sing_plot = plot(1, (x, 1, 1), markers=markers, show=False, annotations=annotations, xlim=(xmin-0.05*lim, xmax*1.1), ylim=(xmin-0.05*lim, xmax*1.1), axis=False, rectangles=rectangles)
else:
sing_plot = plot(1, (x, 1, 1), markers=markers, show=False, annotations=annotations, xlim=(ymin-0.05*lim, ymax*1.1), ylim=(ymin-0.05*lim, ymax*1.1), axis=False, rectangles=rectangles)
return sing_plot
def _draw_nodes(self, subs_dict):
node_markers = []
for node in self._node_coordinates:
if (type(self._node_coordinates[node][0]) in (Symbol, Quantity)):
if self._node_coordinates[node][0] in subs_dict:
self._node_coordinates[node][0] = subs_dict[self._node_coordinates[node][0]]
else:
raise ValueError("provided substituted dictionary is not adequate")
elif (type(self._node_coordinates[node][0]) == Mul):
objects = self._node_coordinates[node][0].as_coeff_Mul()
for object in objects:
if type(object) in (Symbol, Quantity):
if subs_dict==None or object not in subs_dict:
raise ValueError("provided substituted dictionary is not adequate")
else:
self._node_coordinates[node][0] /= object
self._node_coordinates[node][0] *= subs_dict[object]
if (type(self._node_coordinates[node][1]) in (Symbol, Quantity)):
if self._node_coordinates[node][1] in subs_dict:
self._node_coordinates[node][1] = subs_dict[self._node_coordinates[node][1]]
else:
raise ValueError("provided substituted dictionary is not adequate")
elif (type(self._node_coordinates[node][1]) == Mul):
objects = self._node_coordinates[node][1].as_coeff_Mul()
for object in objects:
if type(object) in (Symbol, Quantity):
if subs_dict==None or object not in subs_dict:
raise ValueError("provided substituted dictionary is not adequate")
else:
self._node_coordinates[node][1] /= object
self._node_coordinates[node][1] *= subs_dict[object]
for node in self._node_coordinates:
node_markers.append(
{
'args':[[self._node_coordinates[node][0]], [self._node_coordinates[node][1]]],
'marker':'o',
'markersize':5,
'color':'black'
}
)
return node_markers
def _draw_members(self):
member_rectangles = []
xmax = -INF
xmin = INF
ymax = -INF
ymin = INF
for node in self._node_coordinates:
xmax = max(xmax, self._node_coordinates[node][0])
xmin = min(xmin, self._node_coordinates[node][0])
ymax = max(ymax, self._node_coordinates[node][1])
ymin = min(ymin, self._node_coordinates[node][1])
if abs(1.1*xmax-0.8*xmin)>abs(1.1*ymax-0.8*ymin):
max_diff = 1.1*xmax-0.8*xmin
else:
max_diff = 1.1*ymax-0.8*ymin
for member in self._members:
x1 = self._node_coordinates[self._members[member][0]][0]
y1 = self._node_coordinates[self._members[member][0]][1]
x2 = self._node_coordinates[self._members[member][1]][0]
y2 = self._node_coordinates[self._members[member][1]][1]
if x2!=x1 and y2!=y1:
if x2>x1:
member_rectangles.append(
{
'xy':(x1-0.005*max_diff*cos(pi/4+atan((y2-y1)/(x2-x1)))/2, y1-0.005*max_diff*sin(pi/4+atan((y2-y1)/(x2-x1)))/2),
'width':sqrt((x1-x2)**2+(y1-y2)**2)+0.005*max_diff/math.sqrt(2),
'height':0.005*max_diff,
'angle':180*atan((y2-y1)/(x2-x1))/pi,
'color':'brown'
}
)
else:
member_rectangles.append(
{
'xy':(x2-0.005*max_diff*cos(pi/4+atan((y2-y1)/(x2-x1)))/2, y2-0.005*max_diff*sin(pi/4+atan((y2-y1)/(x2-x1)))/2),
'width':sqrt((x1-x2)**2+(y1-y2)**2)+0.005*max_diff/math.sqrt(2),
'height':0.005*max_diff,
'angle':180*atan((y2-y1)/(x2-x1))/pi,
'color':'brown'
}
)
elif y2==y1:
if x2>x1:
member_rectangles.append(
{
'xy':(x1-0.005*max_diff/2, y1-0.005*max_diff/2),
'width':sqrt((x1-x2)**2+(y1-y2)**2),
'height':0.005*max_diff,
'angle':90*(1-math.copysign(1, x2-x1)),
'color':'brown'
}
)
else:
member_rectangles.append(
{
'xy':(x1-0.005*max_diff/2, y1-0.005*max_diff/2),
'width':sqrt((x1-x2)**2+(y1-y2)**2),
'height':-0.005*max_diff,
'angle':90*(1-math.copysign(1, x2-x1)),
'color':'brown'
}
)
else:
if y1<y2:
member_rectangles.append(
{
'xy':(x1-0.005*max_diff/2, y1-0.005*max_diff/2),
'width':sqrt((x1-x2)**2+(y1-y2)**2)+0.005*max_diff/2,
'height':0.005*max_diff,
'angle':90*math.copysign(1, y2-y1),
'color':'brown'
}
)
else:
member_rectangles.append(
{
'xy':(x2-0.005*max_diff/2, y2-0.005*max_diff/2),
'width':-(sqrt((x1-x2)**2+(y1-y2)**2)+0.005*max_diff/2),
'height':0.005*max_diff,
'angle':90*math.copysign(1, y2-y1),
'color':'brown'
}
)
return member_rectangles
def _draw_supports(self):
support_markers = []
xmax = -INF
xmin = INF
ymax = -INF
ymin = INF
for node in self._node_coordinates:
xmax = max(xmax, self._node_coordinates[node][0])
xmin = min(xmin, self._node_coordinates[node][0])
ymax = max(ymax, self._node_coordinates[node][1])
ymin = min(ymin, self._node_coordinates[node][1])
if abs(1.1*xmax-0.8*xmin)>abs(1.1*ymax-0.8*ymin):
max_diff = 1.1*xmax-0.8*xmin
else:
max_diff = 1.1*ymax-0.8*ymin
for node in self._supports:
if self._supports[node]=='pinned':
support_markers.append(
{
'args':[
[self._node_coordinates[node][0]],
[self._node_coordinates[node][1]]
],
'marker':6,
'markersize':15,
'color':'black',
'markerfacecolor':'none'
}
)
support_markers.append(
{
'args':[
[self._node_coordinates[node][0]],
[self._node_coordinates[node][1]-0.035*max_diff]
],
'marker':'_',
'markersize':14,
'color':'black'
}
)
elif self._supports[node]=='roller':
support_markers.append(
{
'args':[
[self._node_coordinates[node][0]],
[self._node_coordinates[node][1]-0.02*max_diff]
],
'marker':'o',
'markersize':11,
'color':'black',
'markerfacecolor':'none'
}
)
support_markers.append(
{
'args':[
[self._node_coordinates[node][0]],
[self._node_coordinates[node][1]-0.0375*max_diff]
],
'marker':'_',
'markersize':14,
'color':'black'
}
)
return support_markers
def _draw_loads(self):
load_annotations = []
xmax = -INF
xmin = INF
ymax = -INF
ymin = INF
for node in self._node_coordinates:
xmax = max(xmax, self._node_coordinates[node][0])
xmin = min(xmin, self._node_coordinates[node][0])
ymax = max(ymax, self._node_coordinates[node][1])
ymin = min(ymin, self._node_coordinates[node][1])
if abs(1.1*xmax-0.8*xmin)>abs(1.1*ymax-0.8*ymin):
max_diff = 1.1*xmax-0.8*xmin+5
else:
max_diff = 1.1*ymax-0.8*ymin+5
for node in self._loads:
for load in self._loads[node]:
if load[0] in [Symbol('R_'+str(node)+'_x'), Symbol('R_'+str(node)+'_y')]:
continue
x = self._node_coordinates[node][0]
y = self._node_coordinates[node][1]
load_annotations.append(
{
'text':'',
'xy':(
x-math.cos(pi*load[1]/180)*(max_diff/100),
y-math.sin(pi*load[1]/180)*(max_diff/100)
),
'xytext':(
x-(max_diff/100+abs(xmax-xmin)+abs(ymax-ymin))*math.cos(pi*load[1]/180)/20,
y-(max_diff/100+abs(xmax-xmin)+abs(ymax-ymin))*math.sin(pi*load[1]/180)/20
),
'arrowprops':{'width':1.5, 'headlength':5, 'headwidth':5, 'facecolor':'black'}
}
)
return load_annotations
| Truss |
python | davidhalter__jedi | jedi/inference/value/function.py | {
"start": 1493,
"end": 1968
} | class ____(TreeValue):
def get_qualified_names(self):
if self.parent_context.is_class():
n = self.parent_context.get_qualified_names()
if n is None:
# This means that the parent class lives within a function.
return None
return n + (self.py__name__(),)
elif self.parent_context.is_module():
return (self.py__name__(),)
else:
return None
| FunctionAndClassBase |
python | keras-team__keras | keras/src/layers/preprocessing/mel_spectrogram_test.py | {
"start": 173,
"end": 3388
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_mel_spectrogram_basics(self):
self.run_layer_test(
layers.MelSpectrogram,
init_kwargs={
"num_mel_bins": 80,
"sampling_rate": 8000,
"sequence_stride": 128,
"fft_length": 2048,
},
input_shape=(2, 16000),
expected_output_shape=(2, 80, 126),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.MelSpectrogram,
init_kwargs={
"num_mel_bins": 80,
"sampling_rate": 8000,
"sequence_stride": 128,
"fft_length": 2048,
},
input_shape=(16000,),
expected_output_shape=(80, 126),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
[
((2, 16000), 80, 128, 2048, 8000, False),
((16000,), 80, 128, 2048, 8000, False),
((2, 16001), 80, 128, 2048, 16000, False),
((16001,), 80, 128, 2048, 8000, False),
((2, 8000), 128, 64, 512, 32000, False),
((8000,), 128, 64, 512, 32000, False),
((2, 8000), 128, 64, 512, 32000, True),
((8000,), 128, 64, 512, 32000, True),
]
)
def test_output_shape(
self,
input_shape,
num_mel_bins,
sequence_stride,
fft_length,
sampling_rate,
all_zero,
):
if all_zero:
audios = np.zeros(input_shape)
else:
audios = np.random.random(input_shape)
out = layers.MelSpectrogram(
num_mel_bins=num_mel_bins,
sequence_stride=sequence_stride,
fft_length=fft_length,
sampling_rate=sampling_rate,
)(audios)
if len(input_shape) == 1:
ref_shape = (
num_mel_bins,
(input_shape[0] + sequence_stride + 1) // sequence_stride,
)
else:
ref_shape = (
input_shape[0],
num_mel_bins,
(input_shape[1] + sequence_stride + 1) // sequence_stride,
)
self.assertEqual(tuple(out.shape), ref_shape)
def test_tf_data_compatibility(self):
input_shape = (2, 16000)
output_shape = (2, 80, 126)
layer = layers.MelSpectrogram(
num_mel_bins=80,
sampling_rate=8000,
sequence_stride=128,
fft_length=2048,
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
| MelSpectrogramTest |
python | mlflow__mlflow | tests/genai/judges/test_judge_tool_registry.py | {
"start": 908,
"end": 6602
} | class ____(JudgeTool):
@property
def name(self) -> str:
return "mock_tool"
def get_definition(self) -> ToolDefinition:
return ToolDefinition(
function={
"name": "mock_tool",
"description": "A mock tool for testing",
"parameters": {"type": "object", "properties": {}, "required": []},
},
type="function",
)
def invoke(self, trace: Trace, **kwargs) -> str:
return f"mock_result_with_{len(kwargs)}_args"
def test_registry_register_and_list_tools():
registry = JudgeToolRegistry()
mock_tool = MockTool()
assert len(registry.list_tools()) == 0
registry.register(mock_tool)
tools = registry.list_tools()
assert len(tools) == 1
assert tools[0].name == "mock_tool"
@pytest.mark.parametrize("tracing_enabled", [True, False])
def test_registry_invoke_tool_success(tracing_enabled, monkeypatch):
if tracing_enabled:
monkeypatch.setenv("MLFLOW_GENAI_EVAL_ENABLE_SCORER_TRACING", "true")
registry = JudgeToolRegistry()
mock_tool = MockTool()
registry.register(mock_tool)
trace_info = TraceInfo(
trace_id="test-trace-id",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=100,
)
trace = Trace(info=trace_info, data=None)
tool_call = ToolCall(
function=FunctionToolCallArguments(
name="mock_tool", arguments=json.dumps({"param": "value"})
)
)
result = registry.invoke(tool_call, trace)
assert result == "mock_result_with_1_args"
if tracing_enabled:
traces = mlflow.search_traces(return_type="list")
assert len(traces) == 1
# Tool itself only creates one span. In real case, it will be under the parent scorer trace.
assert len(traces[0].data.spans) == 1
assert traces[0].data.spans[0].name == "mock_tool"
assert traces[0].data.spans[0].span_type == SpanType.TOOL
def test_registry_invoke_tool_not_found():
registry = JudgeToolRegistry()
trace_info = TraceInfo(
trace_id="test-trace-id",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=100,
)
trace = Trace(info=trace_info, data=None)
tool_call = ToolCall(
function=FunctionToolCallArguments(name="nonexistent_tool", arguments=json.dumps({}))
)
with pytest.raises(MlflowException, match="Tool 'nonexistent_tool' not found in registry"):
registry.invoke(tool_call, trace)
def test_registry_invoke_tool_invalid_json():
registry = JudgeToolRegistry()
mock_tool = MockTool()
registry.register(mock_tool)
trace_info = TraceInfo(
trace_id="test-trace-id",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=100,
)
trace = Trace(info=trace_info, data=None)
tool_call = ToolCall(
function=FunctionToolCallArguments(name="mock_tool", arguments="invalid json {{")
)
with pytest.raises(MlflowException, match="Invalid JSON arguments for tool 'mock_tool'"):
registry.invoke(tool_call, trace)
def test_registry_invoke_tool_invalid_arguments():
registry = JudgeToolRegistry()
class StrictTool(JudgeTool):
@property
def name(self) -> str:
return "strict_tool"
def get_definition(self) -> ToolDefinition:
return ToolDefinition(function={}, type="function")
def invoke(self, trace: Trace, required_param: str) -> str:
return f"result_{required_param}"
strict_tool = StrictTool()
registry.register(strict_tool)
trace_info = TraceInfo(
trace_id="test-trace-id",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=100,
)
trace = Trace(info=trace_info, data=None)
tool_call = ToolCall(
function=FunctionToolCallArguments(name="strict_tool", arguments=json.dumps({}))
)
with pytest.raises(MlflowException, match="Invalid arguments for tool 'strict_tool'"):
registry.invoke(tool_call, trace)
def test_global_functions_work(restore_global_registry):
mock_tool = MockTool()
register_judge_tool(mock_tool)
tools = list_judge_tools()
tool_names = [t.name for t in tools]
assert "mock_tool" in tool_names
trace_info = TraceInfo(
trace_id="test-trace-id",
trace_location=TraceLocation.from_experiment_id("0"),
request_time=1234567890,
state=TraceState.OK,
execution_duration=100,
)
trace = Trace(info=trace_info, data=None)
tool_call = ToolCall(
function=FunctionToolCallArguments(name="mock_tool", arguments=json.dumps({}))
)
result = invoke_judge_tool(tool_call, trace)
assert result == "mock_result_with_0_args"
def test_builtin_tools_are_properly_registered():
tools = list_judge_tools()
registered_tool_names = {t.name for t in tools if not isinstance(t, MockTool)}
# Only include tool constants that don't start with underscore (public tools)
all_tool_constants = {
value
for name, value in inspect.getmembers(ToolNames)
if not name.startswith("_") and isinstance(value, str)
}
assert all_tool_constants == registered_tool_names
for tool in tools:
if tool.name in all_tool_constants:
assert isinstance(tool, JudgeTool)
| MockTool |
python | doocs__leetcode | solution/2300-2399/2347.Best Poker Hand/Solution.py | {
"start": 0,
"end": 408
} | class ____:
def bestHand(self, ranks: List[int], suits: List[str]) -> str:
# if len(set(suits)) == 1:
if all(a == b for a, b in pairwise(suits)):
return 'Flush'
cnt = Counter(ranks)
if any(v >= 3 for v in cnt.values()):
return 'Three of a Kind'
if any(v == 2 for v in cnt.values()):
return 'Pair'
return 'High Card'
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF008_attrs.py | {
"start": 462,
"end": 751
} | class ____:
mutable_default: list[int] = []
immutable_annotation: Sequence[int] = []
without_annotation = []
correct_code: list[int] = KNOWINGLY_MUTABLE_DEFAULT
perfectly_fine: list[int] = field(default_factory=list)
class_variable: ClassVar[list[int]] = []
@attr.s
| B |
python | pytorch__pytorch | torch/fx/experimental/optimization.py | {
"start": 7110,
"end": 9409
} | class ____:
def __init__(self, fx_graph: fx.Graph):
self.fx_graph = fx_graph
self.nodes: list[fx.Node] = []
self.start_nodes: list[fx.Node] = []
self.end_nodes: list[fx.Node] = []
def gen_mkl_autotuner(example_inputs, iters=10, warmup=1):
"""
This generates a heuristic that can be passed into `optimize_for_inference` that
determines whether a subgraph should be run in MKL by running it with the example_inputs.
Example usage:
heuristic = gen_mkl_autotuner(example_inputs, iters=10)
fast_model = optimization.optimize_for_inference(model, heuristic)
"""
fx_model = None
old_modules = None
def use_mkl_heuristic(graph: MklSubgraph) -> bool:
nonlocal fx_model, old_modules
input_nodes = graph.start_nodes
if fx_model is None:
fx_model = graph.fx_graph.owning_module
old_modules = graph.fx_graph.old_modules # type: ignore[attr-defined]
ShapeProp(fx_model).propagate(example_inputs)
sample_inputs = [torch.randn(node.shape) for node in input_nodes] # type: ignore[attr-defined]
output_args = cast(list[fx.Node], [node.args[0] for node in graph.end_nodes])
submodule = extract_subgraph(fx_model, graph.nodes, input_nodes, output_args)
def benchmark(f):
for _ in range(warmup):
f()
begin = time.time()
for _ in range(iters):
f()
return time.time() - begin
mkl_time = benchmark(
lambda: [
i.to_dense() for i in submodule(*[i.to_mkldnn() for i in sample_inputs])
]
)
reset_modules(
submodule.graph.nodes,
dict(submodule.named_modules()),
# pyrefly: ignore [bad-argument-type]
old_modules,
)
no_mkl_time = benchmark(lambda: submodule(*sample_inputs))
return mkl_time < no_mkl_time
return use_mkl_heuristic
def use_mkl_length(graph: MklSubgraph) -> bool:
"""
This is a heuristic that can be passed into `optimize_for_inference` that
determines whether a subgraph should be run in MKL by checking if there
are more than 2 nodes in it
"""
return len(graph.nodes) > 2
| MklSubgraph |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/variables/variables_test.py | {
"start": 28278,
"end": 29438
} | class ____(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variable_v1.VariableV1([1, 2])
w = variable_v1.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variable_v1.VariableV1([1, 2])
w = variable_v1.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(v.initializer)
inited.op.run()
| ObsoleteIsInitializedTest |
python | scikit-learn__scikit-learn | sklearn/model_selection/_plot.py | {
"start": 339,
"end": 4163
} | class ____:
def _plot_curve(
self,
x_data,
*,
ax=None,
negate_score=False,
score_name=None,
score_type="test",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
check_matplotlib_support(f"{self.__class__.__name__}.plot")
import matplotlib.pyplot as plt
if ax is None:
_, ax = plt.subplots()
if negate_score:
train_scores, test_scores = -self.train_scores, -self.test_scores
else:
train_scores, test_scores = self.train_scores, self.test_scores
if std_display_style not in ("errorbar", "fill_between", None):
raise ValueError(
f"Unknown std_display_style: {std_display_style}. Should be one of"
" 'errorbar', 'fill_between', or None."
)
if score_type not in ("test", "train", "both"):
raise ValueError(
f"Unknown score_type: {score_type}. Should be one of 'test', "
"'train', or 'both'."
)
if score_type == "train":
scores = {"Train": train_scores}
elif score_type == "test":
scores = {"Test": test_scores}
else: # score_type == "both"
scores = {"Train": train_scores, "Test": test_scores}
if std_display_style in ("fill_between", None):
# plot the mean score
if line_kw is None:
line_kw = {}
self.lines_ = []
for line_label, score in scores.items():
self.lines_.append(
*ax.plot(
x_data,
score.mean(axis=1),
label=line_label,
**line_kw,
)
)
self.errorbar_ = None
self.fill_between_ = None # overwritten below by fill_between
if std_display_style == "errorbar":
if errorbar_kw is None:
errorbar_kw = {}
self.errorbar_ = []
for line_label, score in scores.items():
self.errorbar_.append(
ax.errorbar(
x_data,
score.mean(axis=1),
score.std(axis=1),
label=line_label,
**errorbar_kw,
)
)
self.lines_, self.fill_between_ = None, None
elif std_display_style == "fill_between":
if fill_between_kw is None:
fill_between_kw = {}
default_fill_between_kw = {"alpha": 0.5}
fill_between_kw = {**default_fill_between_kw, **fill_between_kw}
self.fill_between_ = []
for line_label, score in scores.items():
self.fill_between_.append(
ax.fill_between(
x_data,
score.mean(axis=1) - score.std(axis=1),
score.mean(axis=1) + score.std(axis=1),
**fill_between_kw,
)
)
score_name = self.score_name if score_name is None else score_name
ax.legend()
# We found that a ratio, smaller or bigger than 5, between the largest and
# smallest gap of the x values is a good indicator to choose between linear
# and log scale.
if _interval_max_min_ratio(x_data) > 5:
xscale = "symlog" if x_data.min() <= 0 else "log"
else:
xscale = "linear"
ax.set_xscale(xscale)
ax.set_ylabel(f"{score_name}")
self.ax_ = ax
self.figure_ = ax.figure
| _BaseCurveDisplay |
python | pytorch__pytorch | torch/distributed/elastic/timer/local_timer.py | {
"start": 2141,
"end": 4282
} | class ____(TimerServer):
"""
Server that works with ``LocalTimerClient``. Clients are expected to be
subprocesses to the parent process that is running this server. Each host
in the job is expected to start its own timer server locally and each
server instance manages timers for local workers (running on processes
on the same host).
"""
def __init__(
self, mp_queue: mp.Queue, max_interval: float = 60, daemon: bool = True
):
super().__init__(MultiprocessingRequestQueue(mp_queue), max_interval, daemon)
self._timers: dict[tuple[Any, str], TimerRequest] = {}
def register_timers(self, timer_requests: list[TimerRequest]) -> None:
for request in timer_requests:
pid = request.worker_id
scope_id = request.scope_id
expiration_time = request.expiration_time
# negative expiration is a proxy for a release call
if expiration_time < 0:
self._timers.pop((pid, scope_id), None)
else:
self._timers[(pid, scope_id)] = request
def clear_timers(self, worker_ids: set[int]) -> None:
for pid, scope_id in list(self._timers.keys()):
if pid in worker_ids:
self._timers.pop((pid, scope_id))
def get_expired_timers(self, deadline: float) -> dict[Any, list[TimerRequest]]:
# pid -> [timer_requests...]
expired_timers: dict[Any, list[TimerRequest]] = {}
for request in self._timers.values():
if request.expiration_time <= deadline:
expired_scopes = expired_timers.setdefault(request.worker_id, [])
expired_scopes.append(request)
return expired_timers
def _reap_worker(self, worker_id: int) -> bool:
try:
os.kill(worker_id, signal.SIGKILL)
return True
except ProcessLookupError:
logger.info("Process with pid=%s does not exist. Skipping", worker_id)
return True
except Exception:
logger.exception("Error terminating pid=%s", worker_id)
return False
| LocalTimerServer |
python | pola-rs__polars | py-polars/src/polars/interchange/protocol.py | {
"start": 5024,
"end": 6679
} | class ____(Protocol):
"""Interchange dataframe object."""
version: ClassVar[int] # Version of the protocol
def __dataframe__(
self,
nan_as_null: bool = False, # noqa: FBT001
allow_copy: bool = True, # noqa: FBT001
) -> DataFrame:
"""Convert to a dataframe object implementing the dataframe interchange protocol.""" # noqa: W505
@property
def metadata(self) -> dict[str, Any]:
"""The metadata for the dataframe."""
def num_columns(self) -> int:
"""Return the number of columns in the dataframe."""
def num_rows(self) -> int | None:
"""Return the number of rows in the dataframe, if available."""
def num_chunks(self) -> int:
"""Return the number of chunks the dataframe consists of.."""
def column_names(self) -> Iterable[str]:
"""Return the column names."""
def get_column(self, i: int) -> Column:
"""Return the column at the indicated position."""
def get_column_by_name(self, name: str) -> Column:
"""Return the column with the given name."""
def get_columns(self) -> Iterable[Column]:
"""Return an iterator yielding the columns."""
def select_columns(self, indices: Sequence[int]) -> DataFrame:
"""Create a new dataframe by selecting a subset of columns by index."""
def select_columns_by_name(self, names: Sequence[str]) -> DataFrame:
"""Create a new dataframe by selecting a subset of columns by name."""
def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
"""Return an iterator yielding the chunks of the dataframe."""
| DataFrame |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF033.py | {
"start": 345,
"end": 435
} | class ____:
def __post_init__(self, bar = 11, baz = 11) -> None: ...
# OK
@dataclass
| Foo |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 3975,
"end": 4364
} | class ____(graphene.InputObjectType):
groupName = graphene.NonNull(graphene.String)
repositoryName = graphene.NonNull(graphene.String)
repositoryLocationName = graphene.NonNull(graphene.String)
class Meta:
description = """This type represents the fields necessary to identify
an asset group."""
name = "AssetGroupSelector"
| GrapheneAssetGroupSelector |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py | {
"start": 48083,
"end": 48722
} | class ____(nn.Module):
def __init__(self, config: RTDetrV2Config):
super().__init__()
self.layers = nn.ModuleList([RTDetrV2EncoderLayer(config) for _ in range(config.encoder_layers)])
def forward(self, src, src_mask=None, pos_embed=None, output_attentions: bool = False) -> torch.Tensor:
hidden_states = src
for layer in self.layers:
hidden_states = layer(
hidden_states,
attention_mask=src_mask,
position_embeddings=pos_embed,
output_attentions=output_attentions,
)
return hidden_states
| RTDetrV2Encoder |
python | pypa__warehouse | warehouse/accounts/models.py | {
"start": 12329,
"end": 13598
} | class ____(db.ModelBase):
__tablename__ = "user_emails"
__table_args__ = (
UniqueConstraint("email", name="user_emails_email_key"),
Index("user_emails_user_id", "user_id"),
)
id: Mapped[int] = mapped_column(primary_key=True)
user_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("users.id", deferrable=True, initially="DEFERRED"),
)
user: Mapped[User] = orm.relationship(back_populates="emails")
email: Mapped[str] = mapped_column(String(length=254))
primary: Mapped[bool]
verified: Mapped[bool]
public: Mapped[bool_false]
# Deliverability information
unverify_reason: Mapped[UnverifyReasons | None]
transient_bounces: Mapped[int] = mapped_column(server_default=sql.text("0"))
# Domain validation information
domain_last_checked: Mapped[datetime.datetime | None] = mapped_column(
comment="Last time domain was checked with the domain validation service.",
index=True,
)
domain_last_status: Mapped[list[str] | None] = mapped_column(
ARRAY(String),
comment="Status strings returned by the domain validation service.",
)
@property
def domain(self):
return self.email.split("@")[-1].lower()
| Email |
python | matplotlib__matplotlib | lib/matplotlib/image.py | {
"start": 51701,
"end": 53836
} | class ____(_ImageBase):
"""An image attached to a figure."""
zorder = 0
_interpolation = 'nearest'
def __init__(self, fig,
*,
cmap=None,
norm=None,
colorizer=None,
offsetx=0,
offsety=0,
origin=None,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
super().__init__(
None,
norm=norm,
cmap=cmap,
colorizer=colorizer,
origin=origin
)
self.set_figure(fig)
self.ox = offsetx
self.oy = offsety
self._internal_update(kwargs)
self.magnification = 1.0
def get_extent(self):
"""Return the image extent as tuple (left, right, bottom, top)."""
numrows, numcols = self.get_size()
return (-0.5 + self.ox, numcols-0.5 + self.ox,
-0.5 + self.oy, numrows-0.5 + self.oy)
def make_image(self, renderer, magnification=1.0, unsampled=False):
# docstring inherited
fig = self.get_figure(root=True)
fac = renderer.dpi/fig.dpi
# fac here is to account for pdf, eps, svg backends where
# figure.dpi is set to 72. This means we need to scale the
# image (using magnification) and offset it appropriately.
bbox = Bbox([[self.ox/fac, self.oy/fac],
[(self.ox/fac + self._A.shape[1]),
(self.oy/fac + self._A.shape[0])]])
width, height = fig.get_size_inches()
width *= renderer.dpi
height *= renderer.dpi
clip = Bbox([[0, 0], [width, height]])
return self._make_image(
self._A, bbox, bbox, clip, magnification=magnification / fac,
unsampled=unsampled, round_to_pixel_border=False)
def set_data(self, A):
"""Set the image array."""
super().set_data(A)
self.stale = True
| FigureImage |
python | protocolbuffers__protobuf | python/google/protobuf/internal/numpy/numpy_test.py | {
"start": 1821,
"end": 4312
} | class ____(unittest.TestCase):
# Assigning dim 1 ndarray of ints to repeated field should pass
def testNumpyDim1IntArrayToRepeated_IsValid(self):
message.repeated_int64[:] = np_1_int_array
message.repeated_int64[:] = np_2_int_array
message.repeated_uint64[:] = np_1_uint_array
message.repeated_uint64[:] = np_2_uint_array
# Assigning dim 2 ndarray of ints to repeated field should fail
def testNumpyDim2IntArrayToRepeated_RaisesTypeError(self):
with self.assertRaises(TypeError):
message.repeated_int64[:] = np_11_int_array
with self.assertRaises(TypeError):
message.repeated_int64[:] = np_22_int_array
with self.assertRaises(TypeError):
message.repeated_uint64[:] = np_11_uint_array
with self.assertRaises(TypeError):
message.repeated_uint64[:] = np_22_uint_array
# Assigning any ndarray of floats to repeated int field should fail
def testNumpyFloatArrayToRepeated_RaisesTypeError(self):
with self.assertRaises(TypeError):
message.repeated_int64[:] = np_1_float_array
with self.assertRaises(TypeError):
message.repeated_int64[:] = np_11_float_array
with self.assertRaises(TypeError):
message.repeated_int64[:] = np_22_float_array
# Assigning any np int to scalar field should pass
def testNumpyIntScalarToScalar_IsValid(self):
message.optional_int64 = np_int_scalar
message.optional_uint64 = np_uint_scalar
# Assigning any ndarray of ints to scalar field should fail
def testNumpyIntArrayToScalar_RaisesTypeError(self):
with self.assertRaises(TypeError):
message.optional_int64 = np_1_int_array
with self.assertRaises(TypeError):
message.optional_int64 = np_11_int_array
with self.assertRaises(TypeError):
message.optional_int64 = np_22_int_array
with self.assertRaises(TypeError):
message.optional_uint64 = np_1_uint_array
with self.assertRaises(TypeError):
message.optional_uint64 = np_11_uint_array
with self.assertRaises(TypeError):
message.optional_uint64 = np_22_uint_array
# Assigning any ndarray of floats to scalar field should fail
def testNumpyFloatArrayToScalar_RaisesTypeError(self):
with self.assertRaises(TypeError):
message.optional_int64 = np_1_float_array
with self.assertRaises(TypeError):
message.optional_int64 = np_11_float_array
with self.assertRaises(TypeError):
message.optional_int64 = np_22_float_array
@testing_refleaks.TestCase
| NumpyIntProtoTest |
python | fluentpython__example-code-2e | 15-more-types/cafeteria/invariant.py | {
"start": 61,
"end": 109
} | class ____: # <1>
"""Any beverage."""
| Beverage |
python | django__django | tests/composite_pk/test_values.py | {
"start": 142,
"end": 9754
} | class ____(TestCase):
USER_1_EMAIL = "user0001@example.com"
USER_2_EMAIL = "user0002@example.com"
USER_3_EMAIL = "user0003@example.com"
POST_1_ID = "77777777-7777-7777-7777-777777777777"
POST_2_ID = "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
POST_3_ID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.tenant_1 = Tenant.objects.create()
cls.tenant_2 = Tenant.objects.create()
cls.user_1 = User.objects.create(
tenant=cls.tenant_1, id=1, email=cls.USER_1_EMAIL
)
cls.user_2 = User.objects.create(
tenant=cls.tenant_1, id=2, email=cls.USER_2_EMAIL
)
cls.user_3 = User.objects.create(
tenant=cls.tenant_2, id=3, email=cls.USER_3_EMAIL
)
cls.post_1 = Post.objects.create(tenant=cls.tenant_1, id=cls.POST_1_ID)
cls.post_2 = Post.objects.create(tenant=cls.tenant_1, id=cls.POST_2_ID)
cls.post_3 = Post.objects.create(tenant=cls.tenant_2, id=cls.POST_3_ID)
def test_values_list(self):
with self.subTest('User.objects.values_list("pk")'):
self.assertSequenceEqual(
User.objects.values_list("pk").order_by("pk"),
(
(self.user_1.pk,),
(self.user_2.pk,),
(self.user_3.pk,),
),
)
with self.subTest('User.objects.values_list("pk", "email")'):
self.assertSequenceEqual(
User.objects.values_list("pk", "email").order_by("pk"),
(
(self.user_1.pk, self.USER_1_EMAIL),
(self.user_2.pk, self.USER_2_EMAIL),
(self.user_3.pk, self.USER_3_EMAIL),
),
)
with self.subTest('User.objects.values_list("pk", "id")'):
self.assertSequenceEqual(
User.objects.values_list("pk", "id").order_by("pk"),
(
(self.user_1.pk, self.user_1.id),
(self.user_2.pk, self.user_2.id),
(self.user_3.pk, self.user_3.id),
),
)
with self.subTest('User.objects.values_list("pk", "tenant_id", "id")'):
self.assertSequenceEqual(
User.objects.values_list("pk", "tenant_id", "id").order_by("pk"),
(
(self.user_1.pk, self.user_1.tenant_id, self.user_1.id),
(self.user_2.pk, self.user_2.tenant_id, self.user_2.id),
(self.user_3.pk, self.user_3.tenant_id, self.user_3.id),
),
)
with self.subTest('User.objects.values_list("pk", flat=True)'):
self.assertSequenceEqual(
User.objects.values_list("pk", flat=True).order_by("pk"),
(
self.user_1.pk,
self.user_2.pk,
self.user_3.pk,
),
)
with self.subTest('Post.objects.values_list("pk", flat=True)'):
self.assertSequenceEqual(
Post.objects.values_list("pk", flat=True).order_by("pk"),
(
(self.tenant_1.id, UUID(self.POST_1_ID)),
(self.tenant_1.id, UUID(self.POST_2_ID)),
(self.tenant_2.id, UUID(self.POST_3_ID)),
),
)
with self.subTest('Post.objects.values_list("pk")'):
self.assertSequenceEqual(
Post.objects.values_list("pk").order_by("pk"),
(
((self.tenant_1.id, UUID(self.POST_1_ID)),),
((self.tenant_1.id, UUID(self.POST_2_ID)),),
((self.tenant_2.id, UUID(self.POST_3_ID)),),
),
)
with self.subTest('Post.objects.values_list("pk", "id")'):
self.assertSequenceEqual(
Post.objects.values_list("pk", "id").order_by("pk"),
(
((self.tenant_1.id, UUID(self.POST_1_ID)), UUID(self.POST_1_ID)),
((self.tenant_1.id, UUID(self.POST_2_ID)), UUID(self.POST_2_ID)),
((self.tenant_2.id, UUID(self.POST_3_ID)), UUID(self.POST_3_ID)),
),
)
with self.subTest('Post.objects.values_list("id", "pk")'):
self.assertSequenceEqual(
Post.objects.values_list("id", "pk").order_by("pk"),
(
(UUID(self.POST_1_ID), (self.tenant_1.id, UUID(self.POST_1_ID))),
(UUID(self.POST_2_ID), (self.tenant_1.id, UUID(self.POST_2_ID))),
(UUID(self.POST_3_ID), (self.tenant_2.id, UUID(self.POST_3_ID))),
),
)
with self.subTest('User.objects.values_list("pk", named=True)'):
Row = namedtuple("Row", ["pk"])
self.assertSequenceEqual(
User.objects.values_list("pk", named=True).order_by("pk"),
(
Row(pk=self.user_1.pk),
Row(pk=self.user_2.pk),
Row(pk=self.user_3.pk),
),
)
with self.subTest('User.objects.values_list("pk", "pk")'):
self.assertSequenceEqual(
User.objects.values_list("pk", "pk").order_by("pk"),
(
(self.user_1.pk, self.user_1.pk),
(self.user_2.pk, self.user_2.pk),
(self.user_3.pk, self.user_3.pk),
),
)
with self.subTest('User.objects.values_list("pk", "id", "pk", "id")'):
self.assertSequenceEqual(
User.objects.values_list("pk", "id", "pk", "id").order_by("pk"),
(
(self.user_1.pk, self.user_1.id, self.user_1.pk, self.user_1.id),
(self.user_2.pk, self.user_2.id, self.user_2.pk, self.user_2.id),
(self.user_3.pk, self.user_3.id, self.user_3.pk, self.user_3.id),
),
)
def test_values(self):
with self.subTest('User.objects.values("pk")'):
self.assertSequenceEqual(
User.objects.values("pk").order_by("pk"),
(
{"pk": self.user_1.pk},
{"pk": self.user_2.pk},
{"pk": self.user_3.pk},
),
)
with self.subTest('User.objects.values("pk", "email")'):
self.assertSequenceEqual(
User.objects.values("pk", "email").order_by("pk"),
(
{"pk": self.user_1.pk, "email": self.USER_1_EMAIL},
{"pk": self.user_2.pk, "email": self.USER_2_EMAIL},
{"pk": self.user_3.pk, "email": self.USER_3_EMAIL},
),
)
with self.subTest('User.objects.values("pk", "id")'):
self.assertSequenceEqual(
User.objects.values("pk", "id").order_by("pk"),
(
{"pk": self.user_1.pk, "id": self.user_1.id},
{"pk": self.user_2.pk, "id": self.user_2.id},
{"pk": self.user_3.pk, "id": self.user_3.id},
),
)
with self.subTest('User.objects.values("pk", "tenant_id", "id")'):
self.assertSequenceEqual(
User.objects.values("pk", "tenant_id", "id").order_by("pk"),
(
{
"pk": self.user_1.pk,
"tenant_id": self.user_1.tenant_id,
"id": self.user_1.id,
},
{
"pk": self.user_2.pk,
"tenant_id": self.user_2.tenant_id,
"id": self.user_2.id,
},
{
"pk": self.user_3.pk,
"tenant_id": self.user_3.tenant_id,
"id": self.user_3.id,
},
),
)
with self.subTest('User.objects.values("pk", "pk")'):
self.assertSequenceEqual(
User.objects.values("pk", "pk").order_by("pk"),
(
{"pk": self.user_1.pk},
{"pk": self.user_2.pk},
{"pk": self.user_3.pk},
),
)
with self.subTest('User.objects.values("pk", "id", "pk", "id")'):
self.assertSequenceEqual(
User.objects.values("pk", "id", "pk", "id").order_by("pk"),
(
{"pk": self.user_1.pk, "id": self.user_1.id},
{"pk": self.user_2.pk, "id": self.user_2.id},
{"pk": self.user_3.pk, "id": self.user_3.id},
),
)
def test_foreign_object_values(self):
Comment.objects.create(id=1, user=self.user_1, integer=42)
testcases = {
"all": Comment.objects.all(),
"exclude_user_email": Comment.objects.exclude(user__email__endswith="net"),
}
for name, queryset in testcases.items():
with self.subTest(name=name):
values = list(queryset.values("user", "integer"))
self.assertEqual(
values[0]["user"], (self.user_1.tenant_id, self.user_1.id)
)
self.assertEqual(values[0]["integer"], 42)
| CompositePKValuesTests |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_R.py | {
"start": 6386,
"end": 7584
} | class ____(Benchmark):
r"""
Ripple 25 objective function.
This class defines the Ripple 25 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ripple25}}(x) = \sum_{i=1}^2 -e^{-2
\log 2 (\frac{x_i-0.1}{0.8})^2}
\left[\sin^6(5 \pi x_i) \right]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -2` for :math:`x_i = 0.1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.1 for _ in range(self.N)]]
self.fglob = -2.0
def fun(self, x, *args):
self.nfev += 1
u = -2.0 * log(2.0) * ((x - 0.1) / 0.8) ** 2.0
v = sin(5.0 * pi * x) ** 6.0
return sum(-exp(u) * v)
| Ripple25 |
python | getsentry__sentry | src/sentry/api/exceptions.py | {
"start": 641,
"end": 1319
} | class ____(APIException):
code = ""
message = ""
def __init__(self, code=None, message=None, detail=None, **kwargs):
# Note that we no longer call the base `__init__` here. This is because
# DRF now forces all detail messages that subclass `APIException` to a
# string, which breaks our format.
# https://www.django-rest-framework.org/community/3.0-announcement/#miscellaneous-notes
if detail is None:
detail = {
"code": code or self.code,
"message": message or self.message,
"extra": kwargs,
}
self.detail = {"detail": detail}
| SentryAPIException |
python | scrapy__scrapy | tests/test_downloader_handlers_http_base.py | {
"start": 27161,
"end": 28699
} | class ____(ABC):
@property
@abstractmethod
def settings_dict(self) -> dict[str, Any] | None:
raise NotImplementedError
is_secure = False
@deferred_f_from_coro_f
async def test_download_with_content_length(self, mockserver: MockServer) -> None:
crawler = get_crawler(SingleRequestSpider, self.settings_dict)
# http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
# download it
await maybe_deferred_to_future(
crawler.crawl(
seed=Request(
url=mockserver.url("/partial", is_secure=self.is_secure),
meta={"download_maxsize": 1000},
)
)
)
assert crawler.spider
failure = crawler.spider.meta["failure"] # type: ignore[attr-defined]
assert isinstance(failure.value, defer.CancelledError)
@deferred_f_from_coro_f
async def test_download(self, mockserver: MockServer) -> None:
crawler = get_crawler(SingleRequestSpider, self.settings_dict)
await maybe_deferred_to_future(
crawler.crawl(
seed=Request(url=mockserver.url("", is_secure=self.is_secure))
)
)
assert crawler.spider
failure = crawler.spider.meta.get("failure") # type: ignore[attr-defined]
assert failure is None
reason = crawler.spider.meta["close_reason"] # type: ignore[attr-defined]
assert reason == "finished"
| TestHttpWithCrawlerBase |
python | getsentry__sentry | tests/sentry/api/endpoints/test_source_map_debug_blue_thunder_edition.py | {
"start": 1911,
"end": 64760
} | class ____(APITestCase):
endpoint = "sentry-api-0-event-source-map-debug-blue-thunder-edition"
def setUp(self) -> None:
self.login_as(self.user)
return super().setUp()
def test_missing_event(self) -> None:
resp = self.get_error_response(
self.organization.slug,
self.project.slug,
"invalid_id",
frame_idx=0,
exception_idx=0,
status_code=status.HTTP_404_NOT_FOUND,
)
assert resp.data["detail"] == "Event not found"
def test_empty_exceptions_array(self) -> None:
event = self.store_event(data=create_event([]), project_id=self.project.id)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["exceptions"] == []
def test_has_debug_ids_true(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[create_exception_with_frame({"abs_path": "/some/path/to/file.js"})],
debug_meta_images=[
{
"type": "sourcemap",
"code_file": "/some/path/to/file.js",
"debug_id": "8d65dbd3-bb6c-5632-9049-7751111284ed",
}
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["has_debug_ids"]
def test_has_debug_ids_false(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[create_exception_with_frame({"abs_path": "/some/path/to/file.js"})],
debug_meta_images=None,
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert not resp.data["has_debug_ids"]
def test_sdk_version(self) -> None:
event = self.store_event(
data=create_event(sdk={"name": "sentry.javascript.react", "version": "7.66.0"}),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["sdk_version"] == "7.66.0"
def test_no_sdk_version(self) -> None:
event = self.store_event(data=create_event(), project_id=self.project.id)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["sdk_version"] is None
def test_sdk_debug_id_support_full(self) -> None:
event = self.store_event(
data=create_event(sdk={"name": "sentry.javascript.react", "version": "7.66.0"}),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["sdk_debug_id_support"] == "full"
def test_sdk_debug_id_support_needs_upgrade(self) -> None:
event = self.store_event(
data=create_event(sdk={"name": "sentry.javascript.react", "version": "7.47.0"}),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert (
resp.data["sdk_debug_id_support"] == "needs-upgrade"
), MIN_JS_SDK_VERSION_FOR_DEBUG_IDS
def test_sdk_debug_id_support_unsupported(self) -> None:
event = self.store_event(
data=create_event(sdk={"name": "sentry.javascript.cordova", "version": "7.47.0"}),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["sdk_debug_id_support"] == "not-supported"
def test_sdk_debug_id_support_community_sdk(self) -> None:
event = self.store_event(
data=create_event(
sdk={"name": "sentry.javascript.some-custom-identifier", "version": "7.47.0"}
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["sdk_debug_id_support"] == "unofficial-sdk"
def test_release_has_some_artifact_positive(self) -> None:
event = self.store_event(
data=create_event(release="some-release"),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=File.objects.create(name="bundle.js", type="release.file"),
name="~/bundle.js",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["release_has_some_artifact"]
def test_release_has_some_artifact_negative(self) -> None:
event = self.store_event(
data=create_event(release="some-release"),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert not resp.data["release_has_some_artifact"]
def test_project_has_some_artifact_bundle_positive(self) -> None:
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=File.objects.create(name="artifact-bundle.zip", type="dummy.file"),
artifact_count=1,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
event = self.store_event(
data=create_event(),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["project_has_some_artifact_bundle"]
def test_project_has_some_artifact_bundle_negative(self) -> None:
event = self.store_event(
data=create_event(),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert not resp.data["project_has_some_artifact_bundle"]
def test_project_has_some_artifact_bundle_with_a_debug_id_positive(self) -> None:
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=File.objects.create(name="artifact-bundle.zip", type="dummy.file"),
artifact_count=1,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id="00000000-00000000-00000000-00000000",
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.SOURCE_MAP.value,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
event = self.store_event(
data=create_event(),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["has_uploaded_some_artifact_with_a_debug_id"]
def test_project_has_some_artifact_bundle_with_a_debug_id_negative(self) -> None:
event = self.store_event(
data=create_event(),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert not resp.data["has_uploaded_some_artifact_with_a_debug_id"]
def test_multiple_exceptions(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "/some/path/to/file.js"}),
create_exception_with_frame({"abs_path": "/some/path/to/some/other/file.js"}),
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert len(resp.data["exceptions"]) == 2
def test_frame_debug_id_no_debug_id(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[create_exception_with_frame({"abs_path": "/some/path/to/file.js"})],
debug_meta_images=[
{
"type": "sourcemap",
"code_file": "/some/path/to/file/that/doesnt/match.js",
"debug_id": "8d65dbd3-bb6c-5632-9049-7751111284ed",
}
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
debug_id_process_result = resp.data["exceptions"][0]["frames"][0]["debug_id_process"]
assert debug_id_process_result["debug_id"] is None
assert not debug_id_process_result["uploaded_source_file_with_correct_debug_id"]
assert not debug_id_process_result["uploaded_source_map_with_correct_debug_id"]
def test_frame_debug_id_no_uploaded_source_no_uploaded_source_map(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[create_exception_with_frame({"abs_path": "/some/path/to/file.js"})],
debug_meta_images=[
{
"type": "sourcemap",
"code_file": "/some/path/to/file.js",
"debug_id": "a5764857-ae35-34dc-8f25-a9c9e73aa898",
}
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
debug_id_process_result = resp.data["exceptions"][0]["frames"][0]["debug_id_process"]
assert debug_id_process_result["debug_id"] == "a5764857-ae35-34dc-8f25-a9c9e73aa898"
assert not debug_id_process_result["uploaded_source_file_with_correct_debug_id"]
assert not debug_id_process_result["uploaded_source_map_with_correct_debug_id"]
def test_frame_debug_id_uploaded_source_no_uploaded_source_map(self) -> None:
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=File.objects.create(name="artifact-bundle.zip", type="test.file"),
artifact_count=1,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id="a5764857-ae35-34dc-8f25-a9c9e73aa898",
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.MINIFIED_SOURCE.value,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
event = self.store_event(
data=create_event(
exceptions=[create_exception_with_frame({"abs_path": "/some/path/to/file.js"})],
debug_meta_images=[
{
"type": "sourcemap",
"code_file": "/some/path/to/file.js",
"debug_id": "a5764857-ae35-34dc-8f25-a9c9e73aa898",
}
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
debug_id_process_result = resp.data["exceptions"][0]["frames"][0]["debug_id_process"]
assert debug_id_process_result["debug_id"] == "a5764857-ae35-34dc-8f25-a9c9e73aa898"
assert debug_id_process_result["uploaded_source_file_with_correct_debug_id"]
assert not debug_id_process_result["uploaded_source_map_with_correct_debug_id"]
def test_frame_debug_id_no_uploaded_source_uploaded_source_map(self) -> None:
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=File.objects.create(name="artifact-bundle.zip", type="test.file"),
artifact_count=1,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id="a5764857-ae35-34dc-8f25-a9c9e73aa898",
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.SOURCE_MAP.value,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
event = self.store_event(
data=create_event(
exceptions=[create_exception_with_frame({"abs_path": "/some/path/to/file.js"})],
debug_meta_images=[
{
"type": "sourcemap",
"code_file": "/some/path/to/file.js",
"debug_id": "a5764857-ae35-34dc-8f25-a9c9e73aa898",
}
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
debug_id_process_result = resp.data["exceptions"][0]["frames"][0]["debug_id_process"]
assert debug_id_process_result["debug_id"] == "a5764857-ae35-34dc-8f25-a9c9e73aa898"
assert not debug_id_process_result["uploaded_source_file_with_correct_debug_id"]
assert debug_id_process_result["uploaded_source_map_with_correct_debug_id"]
def test_frame_debug_id_uploaded_source_uploaded_source_map(self) -> None:
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=File.objects.create(name="artifact-bundle.zip", type="test.file"),
artifact_count=1,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id="a5764857-ae35-34dc-8f25-a9c9e73aa898",
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.SOURCE.value,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id="a5764857-ae35-34dc-8f25-a9c9e73aa898",
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.SOURCE_MAP.value,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
event = self.store_event(
data=create_event(
exceptions=[create_exception_with_frame({"abs_path": "/some/path/to/file.js"})],
debug_meta_images=[
{
"type": "sourcemap",
"code_file": "/some/path/to/file.js",
"debug_id": "a5764857-ae35-34dc-8f25-a9c9e73aa898",
}
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
debug_id_process_result = resp.data["exceptions"][0]["frames"][0]["debug_id_process"]
assert debug_id_process_result["debug_id"] == "a5764857-ae35-34dc-8f25-a9c9e73aa898"
assert debug_id_process_result["uploaded_source_file_with_correct_debug_id"]
assert debug_id_process_result["uploaded_source_map_with_correct_debug_id"]
def test_frame_release_process_release_file_matching_source_file_names(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.js"})
],
release="some-release",
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["matching_source_file_names"] == [
"http://example.com/bundle.js",
"~/bundle.js",
]
def test_frame_release_process_release_file_source_map_reference(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.js"})
],
release="some-release",
),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
file = File.objects.create(name="bundle.js", type="release.file")
fileobj = ContentFile(b'console.log("hello world");\n//# sourceMappingURL=bundle.js.map\n')
file.putfile(fileobj)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=file,
name="~/bundle.js",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["matching_source_map_name"] == "~/bundle.js.map"
assert release_process_result["source_map_reference"] == "bundle.js.map"
def test_frame_release_process_release_file_data_protocol_source_map_reference(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.js"})
],
release="some-release",
),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
file = File.objects.create(
name="bundle.js",
type="release.file",
headers={
"Sourcemap": "data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFpbi5qcy"
},
)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=file,
name="~/bundle.js",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_map_lookup_result"] == "found"
assert release_process_result["source_map_reference"] == "Inline Sourcemap"
assert release_process_result["matching_source_map_name"] is None
def test_frame_release_process_release_file_source_file_not_found(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.js"})
],
release="some-release",
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "unsuccessful"
assert release_process_result["source_map_lookup_result"] == "unsuccessful"
assert release_process_result["source_map_reference"] is None
assert release_process_result["matching_source_map_name"] is None
def test_frame_release_process_release_file_source_file_wrong_dist(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.js"})
],
release="some-release",
dist="some-dist",
),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
file = File.objects.create(
name="bundle.js", type="release.file", headers={"Sourcemap": "bundle.js.map"}
)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=file,
name="~/bundle.js",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "wrong-dist"
assert release_process_result["source_map_lookup_result"] == "unsuccessful"
assert release_process_result["source_map_reference"] is None
assert release_process_result["matching_source_map_name"] is None
def test_frame_release_process_release_file_source_file_successful(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.js"})
],
release="some-release",
),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
file = File.objects.create(
name="bundle.js", type="release.file", headers={"Sourcemap": "bundle.js.map"}
)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=file,
name="~/bundle.js",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
assert release_process_result["source_map_lookup_result"] == "unsuccessful"
assert release_process_result["source_map_reference"] == "bundle.js.map"
assert release_process_result["matching_source_map_name"] == "~/bundle.js.map"
def test_frame_release_process_release_file_source_map_wrong_dist(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.js"})
],
release="some-release",
dist="some-dist",
),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
source_file = File.objects.create(
name="bundle.js", type="release.file", headers={"Sourcemap": "bundle.js.map"}
)
source_map_file = File.objects.create(
name="bundle.js.map",
type="release.file",
)
dist = Distribution.objects.get(name="some-dist", release=release)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=source_file,
name="~/bundle.js",
ident=ReleaseFile.get_ident("~/bundle.js", dist.name),
dist_id=dist.id,
)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=source_map_file,
name="~/bundle.js.map",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
assert release_process_result["source_map_lookup_result"] == "wrong-dist"
assert release_process_result["source_map_reference"] == "bundle.js.map"
assert release_process_result["matching_source_map_name"] == "~/bundle.js.map"
def test_frame_release_process_release_file_source_map_successful(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/static/bundle.js"})
],
release="some-release",
dist="some-dist",
),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
source_file = File.objects.create(
name="static/bundle.js",
type="release.file",
headers={"Sourcemap": "../bundle.js.map"},
)
source_map_file = File.objects.create(
name="bundle.js.map",
type="release.file",
)
dist = Distribution.objects.get(name="some-dist", release=release)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=source_file,
name="~/static/bundle.js",
ident=ReleaseFile.get_ident("~/static/bundle.js", dist.name),
dist_id=dist.id,
)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=source_map_file,
name="~/bundle.js.map",
ident=ReleaseFile.get_ident("~/bundle.js.map", dist.name),
dist_id=dist.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
assert release_process_result["source_map_lookup_result"] == "found"
assert release_process_result["source_map_reference"] == "../bundle.js.map"
assert release_process_result["matching_source_map_name"] == "~/bundle.js.map"
def test_frame_release_process_artifact_bundle_data_protocol_source_map_reference(self) -> None:
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr("files/_/_/bundle.min.js", b'console.log("hello world");')
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
"files": {
"files/_/_/bundle.min.js": {
"url": "~/bundle.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
"Sourcemap": "data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFpbi5qcy",
},
},
},
}
).decode(),
)
compressed.seek(0)
file_obj = File.objects.create(name="artifact_bundle.zip", type="artifact.bundle")
file_obj.putfile(compressed)
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.min.js"})
],
release="some-release",
),
project_id=self.project.id,
)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=file_obj,
artifact_count=1,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name="some-release",
artifact_bundle=artifact_bundle,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=artifact_bundle,
url="~/bundle.min.js",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
assert release_process_result["source_map_lookup_result"] == "found"
assert release_process_result["source_map_reference"] == "Inline Sourcemap"
assert release_process_result["matching_source_map_name"] is None
def test_frame_release_process_artifact_bundle_source_file_wrong_dist(self) -> None:
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr(
"files/_/_/bundle.min.js",
b'console.log("hello world");\n//# sourceMappingURL=bundle.min.js.map\n',
)
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
"files": {
"files/_/_/bundle.min.js": {
"url": "~/bundle.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
},
},
},
}
).decode(),
)
compressed.seek(0)
file_obj = File.objects.create(name="artifact_bundle.zip", type="artifact.bundle")
file_obj.putfile(compressed)
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.min.js"})
],
release="some-release",
dist="some-dist",
),
project_id=self.project.id,
)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=file_obj,
artifact_count=1,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name="some-release",
artifact_bundle=artifact_bundle,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=artifact_bundle,
url="~/bundle.min.js",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "wrong-dist"
def test_frame_release_process_artifact_bundle_source_file_successful(self) -> None:
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr(
"files/_/_/bundle.min.js",
b'console.log("hello world");\n//# sourceMappingURL=bundle.min.js.map\n',
)
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
"files": {
"files/_/_/bundle.min.js": {
"url": "~/bundle.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
},
},
},
}
).decode(),
)
compressed.seek(0)
file_obj = File.objects.create(name="artifact_bundle.zip", type="artifact.bundle")
file_obj.putfile(compressed)
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.min.js"})
],
release="some-release",
),
project_id=self.project.id,
)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=file_obj,
artifact_count=1,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name="some-release",
artifact_bundle=artifact_bundle,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=artifact_bundle,
url="~/bundle.min.js",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
def test_frame_release_process_artifact_bundle_source_map_not_found(self) -> None:
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr(
"files/_/_/bundle.min.js",
b'console.log("hello world");\n//# sourceMappingURL=bundle.min.js.map\n',
)
zip_file.writestr("files/_/_/bundle.min.js.map", b"")
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
"files": {
"files/_/_/bundle.min.js": {
"url": "~/bundle.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/wrong-bundle.min.js.map": {
"url": "~/wrong-bundle.min.js.map",
"type": "source_map",
"headers": {
"content-type": "application/json",
},
},
},
}
).decode(),
)
compressed.seek(0)
file_obj = File.objects.create(name="artifact_bundle.zip", type="artifact.bundle")
file_obj.putfile(compressed)
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.min.js"})
],
release="some-release",
),
project_id=self.project.id,
)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=file_obj,
artifact_count=1,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name="some-release",
artifact_bundle=artifact_bundle,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=artifact_bundle,
url="~/bundle.min.js",
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=artifact_bundle,
url="~/wrong-bundle.min.js.map",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
assert release_process_result["source_map_lookup_result"] == "unsuccessful"
assert release_process_result["source_map_reference"] == "bundle.min.js.map"
assert release_process_result["matching_source_map_name"] == "~/bundle.min.js.map"
def test_frame_release_process_artifact_bundle_source_map_wrong_dist(self) -> None:
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr(
"files/_/_/bundle.min.js",
b'console.log("hello world");\n//# sourceMappingURL=bundle.min.js.map\n',
)
zip_file.writestr("files/_/_/bundle.min.js.map", b"")
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
"files": {
"files/_/_/bundle.min.js": {
"url": "~/bundle.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/bundle.min.js.map": {
"url": "~/bundle.min.js.map",
"type": "source_map",
"headers": {
"content-type": "application/json",
},
},
},
}
).decode(),
)
compressed.seek(0)
file_obj = File.objects.create(name="artifact_bundle.zip", type="artifact.bundle")
file_obj.putfile(compressed)
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.min.js"})
],
release="some-release",
dist="some-dist",
),
project_id=self.project.id,
)
source_file_artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=file_obj,
artifact_count=1,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=source_file_artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name="some-release",
dist_name="some-dist",
artifact_bundle=source_file_artifact_bundle,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=source_file_artifact_bundle,
url="~/bundle.min.js",
)
source_map_artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=file_obj,
artifact_count=1,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=source_map_artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name="some-release",
dist_name="some-other-dist",
artifact_bundle=source_map_artifact_bundle,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=source_map_artifact_bundle,
url="~/bundle.min.js",
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=source_map_artifact_bundle,
url="~/bundle.min.js.map",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
assert release_process_result["source_map_lookup_result"] == "wrong-dist"
assert release_process_result["source_map_reference"] == "bundle.min.js.map"
assert release_process_result["matching_source_map_name"] == "~/bundle.min.js.map"
def test_frame_release_process_artifact_bundle_source_map_successful(self) -> None:
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr(
"files/_/_/bundle.min.js",
b'console.log("hello world");\n//# sourceMappingURL=bundle.min.js.map\n',
)
zip_file.writestr("files/_/_/bundle.min.js.map", b"")
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
"files": {
"files/_/_/bundle.min.js": {
"url": "~/bundle.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/bundle.min.js.map": {
"url": "~/bundle.min.js.map",
"type": "source_map",
"headers": {
"content-type": "application/json",
},
},
},
}
).decode(),
)
compressed.seek(0)
file_obj = File.objects.create(name="artifact_bundle.zip", type="artifact.bundle")
file_obj.putfile(compressed)
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.min.js"})
],
release="some-release",
),
project_id=self.project.id,
)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
file=file_obj,
artifact_count=1,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
ReleaseArtifactBundle.objects.create(
organization_id=self.organization.id,
release_name="some-release",
artifact_bundle=artifact_bundle,
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=artifact_bundle,
url="~/bundle.min.js",
)
ArtifactBundleIndex.objects.create(
organization_id=self.organization.id,
artifact_bundle=artifact_bundle,
url="~/bundle.min.js.map",
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
assert release_process_result["source_map_lookup_result"] == "found"
assert release_process_result["source_map_reference"] == "bundle.min.js.map"
assert release_process_result["matching_source_map_name"] == "~/bundle.min.js.map"
def test_frame_release_file_success(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.min.js"})
],
release="some-release",
dist="some-dist",
),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
dist = Distribution.objects.get(name="some-dist", release=release)
artifact_index = File.objects.create(
name="artifact-index.json",
type=ARTIFACT_INDEX_TYPE,
)
artifact_index.putfile(
ContentFile(
orjson.dumps(
{
"files": {
"~/bundle.min.js": {
"type": "minified_source",
"archive_ident": ReleaseFile.get_ident(
"release-artifacts.zip", dist.name
),
"headers": {
"content-type": "application/json",
},
},
"~/bundle.min.js.map": {
"type": "source_map",
"archive_ident": ReleaseFile.get_ident(
"release-artifacts.zip", dist.name
),
"headers": {
"content-type": "application/json",
},
},
},
}
)
)
)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=artifact_index,
name=ARTIFACT_INDEX_FILENAME,
ident=ReleaseFile.get_ident(ARTIFACT_INDEX_FILENAME, dist.name),
dist_id=dist.id,
artifact_count=2,
)
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr(
"files/_/_/bundle.min.js",
b'console.log("hello world");\n//# sourceMappingURL=bundle.min.js.map\n',
)
zip_file.writestr("files/_/_/bundle.min.js.map", b"")
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
"files": {
"files/_/_/bundle.min.js": {
"url": "~/bundle.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/bundle.min.js.map": {
"url": "~/bundle.min.js.map",
"type": "source_map",
"headers": {
"content-type": "application/json",
},
},
},
}
).decode(),
)
compressed.seek(0)
release_artifact_bundle = File.objects.create(
name="release-artifacts.zip", type="release.bundle"
)
release_artifact_bundle.putfile(compressed)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=release_artifact_bundle,
name="release-artifacts.zip",
ident=ReleaseFile.get_ident("release-artifacts.zip", dist.name),
dist_id=dist.id,
artifact_count=0,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "found"
assert release_process_result["source_map_lookup_result"] == "found"
assert release_process_result["source_map_reference"] == "bundle.min.js.map"
assert release_process_result["matching_source_map_name"] == "~/bundle.min.js.map"
def test_frame_release_file_wrong_dist(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frame({"abs_path": "http://example.com/bundle.min.js"})
],
release="some-release",
dist="some-dist",
),
project_id=self.project.id,
)
release = Release.objects.get(organization=self.organization, version=event.release)
artifact_index = File.objects.create(
name="artifact-index.json",
type=ARTIFACT_INDEX_TYPE,
)
artifact_index.putfile(
ContentFile(
orjson.dumps(
{
"files": {
"~/bundle.min.js": {
"type": "minified_source",
"archive_ident": ReleaseFile.get_ident("release-artifacts.zip"),
"headers": {
"content-type": "application/json",
},
},
"~/bundle.min.js.map": {
"type": "source_map",
"archive_ident": ReleaseFile.get_ident("release-artifacts.zip"),
"headers": {
"content-type": "application/json",
},
},
},
}
)
)
)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=artifact_index,
name=ARTIFACT_INDEX_FILENAME,
ident=ReleaseFile.get_ident(ARTIFACT_INDEX_FILENAME),
artifact_count=2,
)
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr(
"files/_/_/bundle.min.js",
b'console.log("hello world");\n//# sourceMappingURL=bundle.min.js.map\n',
)
zip_file.writestr("files/_/_/bundle.min.js.map", b"")
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
"files": {
"files/_/_/bundle.min.js": {
"url": "~/bundle.min.js",
"type": "minified_source",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/bundle.min.js.map": {
"url": "~/bundle.min.js.map",
"type": "source_map",
"headers": {
"content-type": "application/json",
},
},
},
}
).decode(),
)
compressed.seek(0)
release_artifact_bundle = File.objects.create(
name="release-artifacts.zip", type="release.bundle"
)
release_artifact_bundle.putfile(compressed)
ReleaseFile.objects.create(
organization_id=self.organization.id,
release_id=release.id,
file=release_artifact_bundle,
name="release-artifacts.zip",
ident=ReleaseFile.get_ident("release-artifacts.zip"),
artifact_count=0,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
release_process_result = resp.data["exceptions"][0]["frames"][0]["release_process"]
assert release_process_result["source_file_lookup_result"] == "wrong-dist"
assert release_process_result["source_map_lookup_result"] == "unsuccessful"
def test_has_scraping_data_flag_true(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[],
scraping_attempts=[
{
"url": "https://example.com/bundle0.js",
"status": "success",
}
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["has_scraping_data"]
def test_has_scraping_data_flag_false(self) -> None:
event = self.store_event(
data=create_event(exceptions=[]),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert not resp.data["has_scraping_data"]
def test_scraping_result_source_file(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frames(
[
{"abs_path": "https://example.com/bundle0.js"},
{"abs_path": "https://example.com/bundle1.js"},
{"abs_path": "https://example.com/bundle2.js"},
{"abs_path": "https://example.com/bundle3.js"},
]
),
],
scraping_attempts=[
{
"url": "https://example.com/bundle0.js",
"status": "success",
},
{
"url": "https://example.com/bundle1.js",
"status": "not_attempted",
},
{
"url": "https://example.com/bundle2.js",
"status": "failure",
"reason": "not_found",
"details": "Did not find source",
},
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["exceptions"][0]["frames"][0]["scraping_process"]["source_file"] == {
"url": "https://example.com/bundle0.js",
"status": "success",
}
assert resp.data["exceptions"][0]["frames"][1]["scraping_process"]["source_file"] == {
"url": "https://example.com/bundle1.js",
"status": "not_attempted",
}
assert resp.data["exceptions"][0]["frames"][2]["scraping_process"]["source_file"] == {
"url": "https://example.com/bundle2.js",
"status": "failure",
"reason": "not_found",
"details": "Did not find source",
}
assert resp.data["exceptions"][0]["frames"][3]["scraping_process"]["source_file"] is None
def test_scraping_result_source_map(self) -> None:
event = self.store_event(
data=create_event(
exceptions=[
create_exception_with_frames(
frames=[
{
"abs_path": "./app/index.ts",
"data": {"sourcemap": "https://example.com/bundle0.js.map"},
},
{
"abs_path": "./app/index.ts",
"data": {"sourcemap": "https://example.com/bundle1.js.map"},
},
{
"abs_path": "./app/index.ts",
"data": {"sourcemap": "https://example.com/bundle2.js.map"},
},
{
"abs_path": "./app/index.ts",
"data": {"sourcemap": "https://example.com/bundle3.js.map"},
},
],
raw_frames=[
{
"abs_path": "https://example.com/bundle0.js",
},
{
"abs_path": "https://example.com/bundle1.js",
},
{
"abs_path": "https://example.com/bundle2.js",
},
{
"abs_path": "https://example.com/bundle3.js",
},
],
)
],
scraping_attempts=[
{
"url": "https://example.com/bundle0.js.map",
"status": "success",
},
{
"url": "https://example.com/bundle1.js.map",
"status": "not_attempted",
},
{
"url": "https://example.com/bundle2.js.map",
"status": "failure",
"reason": "not_found",
"details": "Did not find source",
},
],
),
project_id=self.project.id,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
assert resp.data["exceptions"][0]["frames"][0]["scraping_process"]["source_map"] == {
"url": "https://example.com/bundle0.js.map",
"status": "success",
}
assert resp.data["exceptions"][0]["frames"][1]["scraping_process"]["source_map"] == {
"url": "https://example.com/bundle1.js.map",
"status": "not_attempted",
}
assert resp.data["exceptions"][0]["frames"][2]["scraping_process"]["source_map"] == {
"url": "https://example.com/bundle2.js.map",
"status": "failure",
"reason": "not_found",
"details": "Did not find source",
}
assert resp.data["exceptions"][0]["frames"][3]["scraping_process"]["source_map"] is None
| SourceMapDebugBlueThunderEditionEndpointTestCase |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofworkv2.py | {
"start": 124297,
"end": 125237
} | class ____(UOWTest):
def test_ensure_cache(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
cache = {}
eq_(len(inspect(User)._compiled_cache), 0)
with testing.db.connect().execution_options(
compiled_cache=cache
) as conn:
s = Session(conn)
u1 = User(name="adf")
s.add(u1)
s.flush()
is_(conn._execution_options["compiled_cache"], cache)
eq_(len(inspect(User)._compiled_cache), 1)
u1.name = "newname"
s.flush()
is_(conn._execution_options["compiled_cache"], cache)
eq_(len(inspect(User)._compiled_cache), 2)
s.delete(u1)
s.flush()
is_(conn._execution_options["compiled_cache"], cache)
eq_(len(inspect(User)._compiled_cache), 3)
| EnsureCacheTest |
python | django__django | tests/admin_views/models.py | {
"start": 13633,
"end": 14005
} | class ____(models.Model):
"Because we all know there's only one real use case for GFKs."
name = models.CharField(max_length=25)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
def __str__(self):
return self.name
| FunkyTag |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 575031,
"end": 575427
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("DeploymentStatus", graphql_name="node")
"""The item at the end of the edge."""
| DeploymentStatusEdge |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/documentation/documentation.py | {
"start": 835,
"end": 4763
} | class ____(DocumentationCheck):
name = "Breaking changes must be accompanied by a migration guide"
description = "When a breaking change is introduced, we check that a migration guide is available. It should be stored under `./docs/integrations/<connector-type>s/<connector-name>-migrations.md`.\nThis document should contain a section for each breaking change, in order of the version descending. It must explain users which action to take to migrate to the new version."
def _run(self, connector: Connector) -> CheckResult:
breaking_changes = get(connector.metadata, "releases.breakingChanges")
if not breaking_changes:
return self.create_check_result(
connector=connector,
passed=True,
message="No breaking changes found. A migration guide is not required",
)
migration_guide_file_path = connector.migration_guide_file_path
migration_guide_exists = migration_guide_file_path is not None and migration_guide_file_path.exists()
if not migration_guide_exists:
return self.create_check_result(
connector=connector,
passed=False,
message=f"Migration guide file is missing for {connector.technical_name}. Please create a migration guide in ./docs/integrations/<connector-type>s/<connector-name>-migrations.md`",
)
expected_title = f"# {connector.name_from_metadata} Migration Guide"
expected_version_header_start = "## Upgrading to "
migration_guide_content = migration_guide_file_path.read_text()
try:
first_line = migration_guide_content.splitlines()[0]
except IndexError:
first_line = migration_guide_content
if not first_line == expected_title:
return self.create_check_result(
connector=connector,
passed=False,
message=f"Migration guide file for {connector.technical_name} does not start with the correct header. Expected '{expected_title}', got '{first_line}'",
)
# Check that the migration guide contains a section for each breaking change key ## Upgrading to {version}
# Note that breaking change is a dict where the version is the key
# Note that the migration guide must have the sections in order of the version descending
# 3.0.0, 2.0.0, 1.0.0, etc
# This means we have to record the headings in the migration guide and then check that they are in order
# We also have to check that the headings are in the breaking changes dict
ordered_breaking_changes = sorted(breaking_changes.keys(), reverse=True)
ordered_expected_headings = [f"{expected_version_header_start}{version}" for version in ordered_breaking_changes]
ordered_heading_versions = []
for line in migration_guide_content.splitlines():
stripped_line = line.strip()
if stripped_line.startswith(expected_version_header_start):
version = stripped_line.replace(expected_version_header_start, "")
ordered_heading_versions.append(version)
if ordered_breaking_changes != ordered_heading_versions:
return self.create_check_result(
connector=connector,
passed=False,
message=textwrap.dedent(
f"""
Migration guide file for {connector.name_from_metadata} has incorrect version headings.
Check for missing, extra, or misordered headings, or headers with typos.
Expected headings: {ordered_expected_headings}
"""
),
)
return self.create_check_result(
connector=connector,
passed=True,
message="The migration guide is correctly templated",
)
| CheckMigrationGuide |
python | readthedocs__readthedocs.org | readthedocs/core/unresolver.py | {
"start": 1041,
"end": 1140
} | class ____(UnresolverError):
def __init__(self, domain):
self.domain = domain
| DomainError |
python | chroma-core__chroma | chromadb/test/conftest.py | {
"start": 28126,
"end": 33488
} | class ____:
"""This allows consuming tests to be parameterized by async/sync versions of the client and papers over the async implementation.
If you don't need to manually construct clients, use the `client` fixture instead.
"""
_system: System
# Need to track created clients so we can call .clear_system_cache() during teardown
_created_clients: List[ClientAPI] = []
def __init__(self, system: System):
self._system = system
def create_client(self, *args: Any, **kwargs: Any) -> ClientCreator:
if kwargs.get("settings") is None:
kwargs["settings"] = self._system.settings
if (
self._system.settings.chroma_api_impl
== "chromadb.api.async_fastapi.AsyncFastAPI"
):
client = cast(ClientCreator, AsyncClientCreatorSync.create(*args, **kwargs))
self._created_clients.append(client)
return client
client = ClientCreator(*args, **kwargs)
self._created_clients.append(client)
return client
def create_client_from_system(self) -> ClientCreator:
if (
self._system.settings.chroma_api_impl
== "chromadb.api.async_fastapi.AsyncFastAPI"
):
client = cast(
ClientCreator, AsyncClientCreatorSync.from_system_async(self._system)
)
self._created_clients.append(client)
return client
client = ClientCreator.from_system(self._system)
self._created_clients.append(client)
return client
def create_admin_client(self, *args: Any, **kwargs: Any) -> AdminClient:
if (
self._system.settings.chroma_api_impl
== "chromadb.api.async_fastapi.AsyncFastAPI"
):
client = cast(AdminClient, AsyncAdminClientSync(*args, **kwargs))
self._created_clients.append(client)
return client
client = AdminClient(*args, **kwargs)
self._created_clients.append(client)
return client
def create_admin_client_from_system(self) -> AdminClient:
if (
self._system.settings.chroma_api_impl
== "chromadb.api.async_fastapi.AsyncFastAPI"
):
client = cast(AdminClient, AsyncAdminClientSync.from_system(self._system))
self._created_clients.append(client)
return client
client = AdminClient.from_system(self._system)
self._created_clients.append(client)
return client
@pytest.fixture(scope="function")
def client_factories(system: System) -> Generator[ClientFactories, None, None]:
system.reset_state()
factories = ClientFactories(system)
yield factories
while len(factories._created_clients) > 0:
client = factories._created_clients.pop()
client.clear_system_cache()
del client
def create_isolated_database(client: ClientAPI) -> None:
"""Create an isolated database for a test and updates the client to use it."""
admin_settings = client.get_settings()
if admin_settings.chroma_api_impl == "chromadb.api.async_fastapi.AsyncFastAPI":
admin_settings.chroma_api_impl = "chromadb.api.fastapi.FastAPI"
admin = AdminClient(admin_settings)
database = "test_" + str(uuid.uuid4())
admin.create_database(database)
client.set_database(database)
@pytest.fixture(scope="function")
def client(system: System) -> Generator[ClientAPI, None, None]:
system.reset_state()
if system.settings.chroma_api_impl == "chromadb.api.async_fastapi.AsyncFastAPI":
client = cast(Any, AsyncClientCreatorSync.from_system_async(system))
yield client
client.clear_system_cache()
else:
client = ClientCreator.from_system(system)
yield client
client.clear_system_cache()
@pytest.fixture(scope="function")
def http_client(system_http_server: System) -> Generator[ClientAPI, None, None]:
system_http_server.reset_state()
if (
system_http_server.settings.chroma_api_impl
== "chromadb.api.async_fastapi.AsyncFastAPI"
):
client = cast(Any, AsyncClientCreatorSync.from_system_async(system_http_server))
yield client
client.clear_system_cache()
else:
client = ClientCreator.from_system(system_http_server)
yield client
client.clear_system_cache()
@pytest.fixture(scope="function")
def client_ssl(system_ssl: System) -> Generator[ClientAPI, None, None]:
system_ssl.reset_state()
client = ClientCreator.from_system(system_ssl)
yield client
client.clear_system_cache()
@pytest.fixture(scope="function")
def api_wrong_cred(
system_wrong_auth: System,
) -> Generator[ServerAPI, None, None]:
system_wrong_auth.reset_state()
api = system_wrong_auth.instance(ServerAPI)
yield api
@pytest.fixture(scope="function")
def api_with_authn_rbac_authz(
system_authn_rbac_authz: System,
) -> Generator[ServerAPI, None, None]:
system_authn_rbac_authz.reset_state()
api = system_authn_rbac_authz.instance(ServerAPI)
yield api
@pytest.fixture(scope="function")
def api_with_server_auth(system_auth: System) -> Generator[ServerAPI, None, None]:
_sys = system_auth
_sys.reset_state()
api = _sys.instance(ServerAPI)
yield api
# Producer / Consumer fixtures #
| ClientFactories |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 3120,
"end": 3883
} | class ____(MemoryLeakMixin, TestCase):
def test_list_to_from_meminfo(self):
"""
Exercise listobject.{_as_meminfo, _from_meminfo}
"""
@njit
def boxer():
l = listobject.new_list(int32)
for i in range(10, 20):
l.append(i)
return listobject._as_meminfo(l)
lsttype = types.ListType(int32)
@njit
def unboxer(mi):
l = listobject._from_meminfo(mi, lsttype)
return l[0], l[1], l[2], l[3], l[4], l[5], l[6], l[7], l[8], l[9]
mi = boxer()
self.assertEqual(mi.refcount, 1)
received = list(unboxer(mi))
expected = list(range(10, 20))
self.assertEqual(received, expected)
| TestToFromMeminfo |
python | pytorch__pytorch | torch/masked/maskedtensor/core.py | {
"start": 4328,
"end": 13059
} | class ____(torch.Tensor):
@staticmethod
def __new__(cls, data, mask, requires_grad=False):
if is_masked_tensor(data) or not torch.is_tensor(data):
raise TypeError("data must be a Tensor")
if is_masked_tensor(mask) or not torch.is_tensor(mask):
raise TypeError("mask must be a Tensor")
# Use a Tensor that of the give size for the wrapper.
kwargs = {
"device": data.device,
"dtype": data.dtype,
"layout": data.layout,
"requires_grad": requires_grad,
"dispatch_sizes_strides_policy": "strides",
"dispatch_layout": True,
}
warnings.warn(
(
"The PyTorch API of MaskedTensors is in prototype stage "
"and will change in the near future. Please open a Github issue "
"for features requests and see our documentation on the torch.masked "
"module for further information about the project."
),
UserWarning,
stacklevel=2,
)
if data.requires_grad:
warnings.warn(
"It is not recommended to create a MaskedTensor with a tensor that requires_grad. "
"To avoid this, you can use data.detach().clone()",
UserWarning,
stacklevel=2,
)
# pyrefly: ignore [bad-argument-type]
return torch.Tensor._make_wrapper_subclass(cls, data.size(), **kwargs)
def _preprocess_data(self, data, mask):
from .._ops import _sparse_coo_where, _sparse_csr_where
if data.layout != mask.layout:
raise TypeError("data and mask must have the same layout.")
if data.layout == torch.sparse_coo:
data = data.coalesce()
mask = mask.coalesce()
if data._nnz() != mask._nnz():
data = _sparse_coo_where(mask, data, torch.tensor(0))
elif data.layout == torch.sparse_csr:
if data._nnz() != mask._nnz():
data = _sparse_csr_where(mask, data, torch.tensor(0))
# Have to pick awkward names to not conflict with existing fields such as data
self._masked_data = data.clone()
self._masked_mask = mask.clone()
def _validate_members(self):
data = self._masked_data
mask = self.get_mask()
if type(data) is not type(mask):
raise TypeError(
f"data and mask must have the same type. Got {type(data)} and {type(mask)}"
)
if data.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}:
raise TypeError(f"data layout of {data.layout} is not supported.")
if data.layout == torch.sparse_coo:
if not _tensors_match(data.indices(), mask.indices(), exact=True):
raise ValueError(
"data and mask are both sparse COO tensors but do not have the same indices."
)
elif data.layout == torch.sparse_csr:
if not _tensors_match(
data.crow_indices(), mask.crow_indices(), exact=True
) or not _tensors_match(data.col_indices(), mask.col_indices(), exact=True):
raise ValueError(
"data and mask are both sparse CSR tensors but do not share either crow or col indices."
)
if mask.dtype != torch.bool:
raise TypeError("mask must have dtype bool.")
if not (
data.dtype == torch.float16
or data.dtype == torch.float32
or data.dtype == torch.float64
or data.dtype == torch.bool
or data.dtype == torch.int8
or data.dtype == torch.int16
or data.dtype == torch.int32
or data.dtype == torch.int64
):
raise TypeError(f"{data.dtype} is not supported in MaskedTensor.")
if data.dim() != mask.dim():
raise ValueError("data.dim() must equal mask.dim()")
if data.size() != mask.size():
raise ValueError("data.size() must equal mask.size()")
def __init__(self, data, mask, requires_grad=False):
self._preprocess_data(data, mask)
self._validate_members()
@staticmethod
def _from_values(data, mask):
"""Differentiable constructor for MaskedTensor"""
class Constructor(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, data, mask):
return MaskedTensor(data, mask)
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
return grad_output, None
result = Constructor.apply(data, mask)
return result
def _set_data_mask(self, data, mask):
self._masked_data = data
self._masked_mask = mask
self._validate_members()
def __repr__(self): # type: ignore[override]
formatter = "{0:8.4f}"
if self.dim() == 0:
scalar_data = self.get_data().item()
data_formatted = (
formatter.format(scalar_data)
if isinstance(scalar_data, float)
else str(scalar_data)
)
if not self.get_mask().item():
data_formatted = "--"
return (
"MaskedTensor("
+ data_formatted
+ ", "
+ str(self.get_mask().item())
+ ")"
)
s = _masked_tensor_str(self.get_data(), self.get_mask(), formatter)
s = "\n".join(" " + si for si in s.split("\n"))
return "MaskedTensor(\n" + s + "\n)"
# Seems like this needs to be defined before torch_dispatch to work
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
from ._ops_refs import _MASKEDTENSOR_FUNCTION_TABLE
if func in _MASKEDTENSOR_FUNCTION_TABLE:
return _MASKEDTENSOR_FUNCTION_TABLE[func](*args, **kwargs)
if not all(issubclass(cls, t) for t in types):
return NotImplemented
with torch._C.DisableTorchFunctionSubclass():
ret = func(*args, **kwargs)
if func in get_default_nowrap_functions():
return ret
else:
return torch._tensor._convert(ret, cls)
@classmethod
def unary(cls, fn, data, mask):
return MaskedTensor(fn(data), mask)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs): # type: ignore[override]
func = func.overloadpacket
from ._ops_refs import _MASKEDTENSOR_DISPATCH_TABLE
if func in _MASKEDTENSOR_DISPATCH_TABLE:
return _MASKEDTENSOR_DISPATCH_TABLE[func](*args, **kwargs)
msg = (
f"{func.__name__} is not implemented in __torch_dispatch__ for MaskedTensor.\n"
"If you would like this operator to be supported, please file an issue for a feature request at "
"https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n"
"In the case that the semantics for the operator are not trivial, it would be appreciated "
"to also include a proposal for the semantics."
)
warnings.warn(msg, stacklevel=2)
return NotImplemented
def __lt__(self, other):
if is_masked_tensor(other):
return MaskedTensor(self.get_data() < _get_data(other), self.get_mask())
return MaskedTensor(self.get_data() < other, self.get_mask())
def to_tensor(self, value):
return self.get_data().masked_fill(~self.get_mask(), value)
def get_data(self):
class GetData(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, self):
return self._masked_data.detach()
@staticmethod
# pyrefly: ignore [bad-override]
def backward(ctx, grad_output):
if is_masked_tensor(grad_output):
return grad_output
return MaskedTensor(grad_output, self.get_mask())
return GetData.apply(self)
def get_mask(self):
return self._masked_mask
def is_sparse_coo(self):
return self.layout == torch.sparse_coo
def is_sparse_csr(self): # type: ignore[override]
return self.layout == torch.sparse_csr
# Update later to support more sparse layouts
@property
def is_sparse(self): # type: ignore[override]
return self.is_sparse_coo() or self.is_sparse_csr()
| MaskedTensor |
python | ray-project__ray | python/ray/llm/_internal/common/models.py | {
"start": 381,
"end": 1303
} | class ____:
"""Thread-safe global ID manager for assigning unique IDs."""
def __init__(self):
self._counter = 0
self._lock = threading.Lock()
def next(self) -> int:
"""Get the next unique ID."""
with self._lock:
self._counter += 1
return self._counter
# Global instance
global_id_manager = GlobalIdManager()
def make_async(_func: Callable[..., T]) -> Callable[..., Awaitable[T]]:
"""Take a blocking function, and run it on in an executor thread.
This function prevents the blocking function from blocking the asyncio event loop.
The code in this function needs to be thread safe.
"""
def _async_wrapper(*args, **kwargs) -> asyncio.Future:
loop = asyncio.get_event_loop()
func = partial(_func, *args, **kwargs)
return loop.run_in_executor(executor=None, func=func)
return _async_wrapper
| GlobalIdManager |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 32167,
"end": 33420
} | class ____(CythonBase, libpython.PythonInfo):
"""
Implementation of the interface dictated by libpython.LanguageInfo.
"""
def lineno(self, frame):
# Take care of the Python and Cython levels. We need to care for both
# as we can't simply dispatch to 'py-step', since that would work for
# stepping through Python code, but it would not step back into Cython-
# related code. The C level should be dispatched to the 'step' command.
if self.is_cython_function(frame):
return self.get_cython_lineno(frame)[1]
return super().lineno(frame)
def get_source_line(self, frame):
try:
line = super().get_source_line(frame)
except gdb.GdbError:
return None
else:
return line.strip() or None
def exc_info(self, frame):
if self.is_python_function:
return super().exc_info(frame)
def runtime_break_functions(self):
if self.is_cython_function():
return self.get_cython_function().step_into_functions
return ()
def static_break_functions(self):
result = ['PyEval_EvalFrameEx']
result.extend(self.cy.functions_by_cname)
return result
| CythonInfo |
python | pypa__pip | src/pip/_vendor/truststore/_api.py | {
"start": 2369,
"end": 11413
} | class ____(_truststore_SSLContext_super_class): # type: ignore[misc]
"""SSLContext API that uses system certificates on all platforms"""
@property # type: ignore[misc]
def __class__(self) -> type:
# Dirty hack to get around isinstance() checks
# for ssl.SSLContext instances in aiohttp/trustme
# when using non-CPython implementations.
return _truststore_SSLContext_dunder_class or SSLContext
def __init__(self, protocol: int = None) -> None: # type: ignore[assignment]
self._ctx = _original_SSLContext(protocol)
self._ctx_lock = threading.Lock()
class TruststoreSSLObject(ssl.SSLObject):
# This object exists because wrap_bio() doesn't
# immediately do the handshake so we need to do
# certificate verifications after SSLObject.do_handshake()
def do_handshake(self) -> None:
ret = super().do_handshake()
_verify_peercerts(self, server_hostname=self.server_hostname)
return ret
self._ctx.sslobject_class = TruststoreSSLObject
def wrap_socket(
self,
sock: socket.socket,
server_side: bool = False,
do_handshake_on_connect: bool = True,
suppress_ragged_eofs: bool = True,
server_hostname: str | None = None,
session: ssl.SSLSession | None = None,
) -> ssl.SSLSocket:
# We need to lock around the .__enter__()
# but we don't need to lock within the
# context manager, so we need to expand the
# syntactic sugar of the `with` statement.
with contextlib.ExitStack() as stack:
with self._ctx_lock:
stack.enter_context(_configure_context(self._ctx))
ssl_sock = self._ctx.wrap_socket(
sock,
server_side=server_side,
server_hostname=server_hostname,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
session=session,
)
try:
_verify_peercerts(ssl_sock, server_hostname=server_hostname)
except Exception:
ssl_sock.close()
raise
return ssl_sock
def wrap_bio(
self,
incoming: ssl.MemoryBIO,
outgoing: ssl.MemoryBIO,
server_side: bool = False,
server_hostname: str | None = None,
session: ssl.SSLSession | None = None,
) -> ssl.SSLObject:
with _configure_context(self._ctx):
ssl_obj = self._ctx.wrap_bio(
incoming,
outgoing,
server_hostname=server_hostname,
server_side=server_side,
session=session,
)
return ssl_obj
def load_verify_locations(
self,
cafile: str | bytes | os.PathLike[str] | os.PathLike[bytes] | None = None,
capath: str | bytes | os.PathLike[str] | os.PathLike[bytes] | None = None,
cadata: typing.Union[str, "Buffer", None] = None,
) -> None:
return self._ctx.load_verify_locations(
cafile=cafile, capath=capath, cadata=cadata
)
def load_cert_chain(
self,
certfile: _StrOrBytesPath,
keyfile: _StrOrBytesPath | None = None,
password: _PasswordType | None = None,
) -> None:
return self._ctx.load_cert_chain(
certfile=certfile, keyfile=keyfile, password=password
)
def load_default_certs(
self, purpose: ssl.Purpose = ssl.Purpose.SERVER_AUTH
) -> None:
return self._ctx.load_default_certs(purpose)
def set_alpn_protocols(self, alpn_protocols: typing.Iterable[str]) -> None:
return self._ctx.set_alpn_protocols(alpn_protocols)
def set_npn_protocols(self, npn_protocols: typing.Iterable[str]) -> None:
return self._ctx.set_npn_protocols(npn_protocols)
def set_ciphers(self, __cipherlist: str) -> None:
return self._ctx.set_ciphers(__cipherlist)
def get_ciphers(self) -> typing.Any:
return self._ctx.get_ciphers()
def session_stats(self) -> dict[str, int]:
return self._ctx.session_stats()
def cert_store_stats(self) -> dict[str, int]:
raise NotImplementedError()
def set_default_verify_paths(self) -> None:
self._ctx.set_default_verify_paths()
@typing.overload
def get_ca_certs(
self, binary_form: typing.Literal[False] = ...
) -> list[typing.Any]: ...
@typing.overload
def get_ca_certs(self, binary_form: typing.Literal[True] = ...) -> list[bytes]: ...
@typing.overload
def get_ca_certs(self, binary_form: bool = ...) -> typing.Any: ...
def get_ca_certs(self, binary_form: bool = False) -> list[typing.Any] | list[bytes]:
raise NotImplementedError()
@property
def check_hostname(self) -> bool:
return self._ctx.check_hostname
@check_hostname.setter
def check_hostname(self, value: bool) -> None:
self._ctx.check_hostname = value
@property
def hostname_checks_common_name(self) -> bool:
return self._ctx.hostname_checks_common_name
@hostname_checks_common_name.setter
def hostname_checks_common_name(self, value: bool) -> None:
self._ctx.hostname_checks_common_name = value
@property
def keylog_filename(self) -> str:
return self._ctx.keylog_filename
@keylog_filename.setter
def keylog_filename(self, value: str) -> None:
self._ctx.keylog_filename = value
@property
def maximum_version(self) -> ssl.TLSVersion:
return self._ctx.maximum_version
@maximum_version.setter
def maximum_version(self, value: ssl.TLSVersion) -> None:
_original_super_SSLContext.maximum_version.__set__( # type: ignore[attr-defined]
self._ctx, value
)
@property
def minimum_version(self) -> ssl.TLSVersion:
return self._ctx.minimum_version
@minimum_version.setter
def minimum_version(self, value: ssl.TLSVersion) -> None:
_original_super_SSLContext.minimum_version.__set__( # type: ignore[attr-defined]
self._ctx, value
)
@property
def options(self) -> ssl.Options:
return self._ctx.options
@options.setter
def options(self, value: ssl.Options) -> None:
_original_super_SSLContext.options.__set__( # type: ignore[attr-defined]
self._ctx, value
)
@property
def post_handshake_auth(self) -> bool:
return self._ctx.post_handshake_auth
@post_handshake_auth.setter
def post_handshake_auth(self, value: bool) -> None:
self._ctx.post_handshake_auth = value
@property
def protocol(self) -> ssl._SSLMethod:
return self._ctx.protocol
@property
def security_level(self) -> int:
return self._ctx.security_level
@property
def verify_flags(self) -> ssl.VerifyFlags:
return self._ctx.verify_flags
@verify_flags.setter
def verify_flags(self, value: ssl.VerifyFlags) -> None:
_original_super_SSLContext.verify_flags.__set__( # type: ignore[attr-defined]
self._ctx, value
)
@property
def verify_mode(self) -> ssl.VerifyMode:
return self._ctx.verify_mode
@verify_mode.setter
def verify_mode(self, value: ssl.VerifyMode) -> None:
_original_super_SSLContext.verify_mode.__set__( # type: ignore[attr-defined]
self._ctx, value
)
# Python 3.13+ makes get_unverified_chain() a public API that only returns DER
# encoded certificates. We detect whether we need to call public_bytes() for 3.10->3.12
# Pre-3.13 returned None instead of an empty list from get_unverified_chain()
if sys.version_info >= (3, 13):
def _get_unverified_chain_bytes(sslobj: ssl.SSLObject) -> list[bytes]:
unverified_chain = sslobj.get_unverified_chain() or ()
return [
cert if isinstance(cert, bytes) else cert.public_bytes(_ssl.ENCODING_DER)
for cert in unverified_chain
]
else:
def _get_unverified_chain_bytes(sslobj: ssl.SSLObject) -> list[bytes]:
unverified_chain = sslobj.get_unverified_chain() or () # type: ignore[attr-defined]
return [cert.public_bytes(_ssl.ENCODING_DER) for cert in unverified_chain]
def _verify_peercerts(
sock_or_sslobj: ssl.SSLSocket | ssl.SSLObject, server_hostname: str | None
) -> None:
"""
Verifies the peer certificates from an SSLSocket or SSLObject
against the certificates in the OS trust store.
"""
sslobj: ssl.SSLObject = sock_or_sslobj # type: ignore[assignment]
try:
while not hasattr(sslobj, "get_unverified_chain"):
sslobj = sslobj._sslobj # type: ignore[attr-defined]
except AttributeError:
pass
cert_bytes = _get_unverified_chain_bytes(sslobj)
_verify_peercerts_impl(
sock_or_sslobj.context, cert_bytes, server_hostname=server_hostname
)
| SSLContext |
python | pypa__setuptools | setuptools/command/install.py | {
"start": 1004,
"end": 5066
} | class ____(orig.install):
"""Use easy_install to install the package, w/dependencies"""
distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
user_options = orig.install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
(
'single-version-externally-managed',
None,
"used by system package builders to create 'flat' eggs",
),
]
boolean_options = orig.install.boolean_options + [
'old-and-unmanageable',
'single-version-externally-managed',
]
# Type the same as distutils.command.install.install.sub_commands
# Must keep the second tuple item potentially None due to invariance
new_commands: ClassVar[list[tuple[str, Callable[[Any], bool] | None]]] = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
def initialize_options(self):
SetuptoolsDeprecationWarning.emit(
"setup.py install is deprecated.",
"""
Please avoid running ``setup.py`` directly.
Instead, use pypa/build, pypa/installer or other
standards-based tools.
""",
see_url="https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html",
due_date=(2025, 10, 31),
)
super().initialize_options()
self.old_and_unmanageable = None
self.single_version_externally_managed = None
def finalize_options(self) -> None:
super().finalize_options()
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return orig.install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
return None
@staticmethod
def _called_from_setup(run_frame):
"""
Attempt to detect whether run() was called from setup() or by another
command. If called by setup(), the parent caller will be the
'run_command' method in 'distutils.dist', and *its* caller will be
the 'run_commands' method. If called any other way, the
immediate caller *might* be 'run_command', but it won't have been
called by 'run_commands'. Return True in that case or if a call stack
is unavailable. Return False otherwise.
"""
if run_frame is None:
msg = "Call stack not available. bdist_* commands may fail."
SetuptoolsWarning.emit(msg)
if platform.python_implementation() == 'IronPython':
msg = "For best results, pass -X:Frames to enable call stack."
SetuptoolsWarning.emit(msg)
return True
frames = inspect.getouterframes(run_frame)
for frame in frames[2:4]:
(caller,) = frame[:1]
info = inspect.getframeinfo(caller)
caller_module = caller.f_globals.get('__name__', '')
if caller_module == "setuptools.dist" and info.function == "run_command":
# Starting from v61.0.0 setuptools overwrites dist.run_command
continue
return caller_module == 'distutils.dist' and info.function == 'run_commands'
return False
# XXX Python 3.1 doesn't see _nc if this is inside the class
install.sub_commands = [
cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc
] + install.new_commands
| install |
python | openai__gym | gym/spaces/graph.py | {
"start": 999,
"end": 9771
} | class ____(Space):
r"""A space representing graph information as a series of `nodes` connected with `edges` according to an adjacency matrix represented as a series of `edge_links`.
Example usage::
self.observation_space = spaces.Graph(node_space=space.Box(low=-100, high=100, shape=(3,)), edge_space=spaces.Discrete(3))
"""
def __init__(
self,
node_space: Union[Box, Discrete],
edge_space: Union[None, Box, Discrete],
seed: Optional[Union[int, np.random.Generator]] = None,
):
r"""Constructor of :class:`Graph`.
The argument ``node_space`` specifies the base space that each node feature will use.
This argument must be either a Box or Discrete instance.
The argument ``edge_space`` specifies the base space that each edge feature will use.
This argument must be either a None, Box or Discrete instance.
Args:
node_space (Union[Box, Discrete]): space of the node features.
edge_space (Union[None, Box, Discrete]): space of the node features.
seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space.
"""
assert isinstance(
node_space, (Box, Discrete)
), f"Values of the node_space should be instances of Box or Discrete, got {type(node_space)}"
if edge_space is not None:
assert isinstance(
edge_space, (Box, Discrete)
), f"Values of the edge_space should be instances of None Box or Discrete, got {type(node_space)}"
self.node_space = node_space
self.edge_space = edge_space
super().__init__(None, None, seed)
@property
def is_np_flattenable(self):
"""Checks whether this space can be flattened to a :class:`spaces.Box`."""
return False
def _generate_sample_space(
self, base_space: Union[None, Box, Discrete], num: int
) -> Optional[Union[Box, MultiDiscrete]]:
if num == 0 or base_space is None:
return None
if isinstance(base_space, Box):
return Box(
low=np.array(max(1, num) * [base_space.low]),
high=np.array(max(1, num) * [base_space.high]),
shape=(num,) + base_space.shape,
dtype=base_space.dtype,
seed=self.np_random,
)
elif isinstance(base_space, Discrete):
return MultiDiscrete(nvec=[base_space.n] * num, seed=self.np_random)
else:
raise TypeError(
f"Expects base space to be Box and Discrete, actual space: {type(base_space)}."
)
def sample(
self,
mask: Optional[
Tuple[
Optional[Union[np.ndarray, tuple]],
Optional[Union[np.ndarray, tuple]],
]
] = None,
num_nodes: int = 10,
num_edges: Optional[int] = None,
) -> GraphInstance:
"""Generates a single sample graph with num_nodes between 1 and 10 sampled from the Graph.
Args:
mask: An optional tuple of optional node and edge mask that is only possible with Discrete spaces
(Box spaces don't support sample masks).
If no `num_edges` is provided then the `edge_mask` is multiplied by the number of edges
num_nodes: The number of nodes that will be sampled, the default is 10 nodes
num_edges: An optional number of edges, otherwise, a random number between 0 and `num_nodes`^2
Returns:
A NamedTuple representing a graph with attributes .nodes, .edges, and .edge_links.
"""
assert (
num_nodes > 0
), f"The number of nodes is expected to be greater than 0, actual value: {num_nodes}"
if mask is not None:
node_space_mask, edge_space_mask = mask
else:
node_space_mask, edge_space_mask = None, None
# we only have edges when we have at least 2 nodes
if num_edges is None:
if num_nodes > 1:
# maximal number of edges is `n*(n-1)` allowing self connections and two-way is allowed
num_edges = self.np_random.integers(num_nodes * (num_nodes - 1))
else:
num_edges = 0
if edge_space_mask is not None:
edge_space_mask = tuple(edge_space_mask for _ in range(num_edges))
else:
if self.edge_space is None:
warn(
f"The number of edges is set ({num_edges}) but the edge space is None."
)
assert (
num_edges >= 0
), f"Expects the number of edges to be greater than 0, actual value: {num_edges}"
assert num_edges is not None
sampled_node_space = self._generate_sample_space(self.node_space, num_nodes)
sampled_edge_space = self._generate_sample_space(self.edge_space, num_edges)
assert sampled_node_space is not None
sampled_nodes = sampled_node_space.sample(node_space_mask)
sampled_edges = (
sampled_edge_space.sample(edge_space_mask)
if sampled_edge_space is not None
else None
)
sampled_edge_links = None
if sampled_edges is not None and num_edges > 0:
sampled_edge_links = self.np_random.integers(
low=0, high=num_nodes, size=(num_edges, 2)
)
return GraphInstance(sampled_nodes, sampled_edges, sampled_edge_links)
def contains(self, x: GraphInstance) -> bool:
"""Return boolean specifying if x is a valid member of this space."""
if isinstance(x, GraphInstance):
# Checks the nodes
if isinstance(x.nodes, np.ndarray):
if all(node in self.node_space for node in x.nodes):
# Check the edges and edge links which are optional
if isinstance(x.edges, np.ndarray) and isinstance(
x.edge_links, np.ndarray
):
assert x.edges is not None
assert x.edge_links is not None
if self.edge_space is not None:
if all(edge in self.edge_space for edge in x.edges):
if np.issubdtype(x.edge_links.dtype, np.integer):
if x.edge_links.shape == (len(x.edges), 2):
if np.all(
np.logical_and(
x.edge_links >= 0,
x.edge_links < len(x.nodes),
)
):
return True
else:
return x.edges is None and x.edge_links is None
return False
def __repr__(self) -> str:
"""A string representation of this space.
The representation will include node_space and edge_space
Returns:
A representation of the space
"""
return f"Graph({self.node_space}, {self.edge_space})"
def __eq__(self, other) -> bool:
"""Check whether `other` is equivalent to this instance."""
return (
isinstance(other, Graph)
and (self.node_space == other.node_space)
and (self.edge_space == other.edge_space)
)
def to_jsonable(self, sample_n: NamedTuple) -> list:
"""Convert a batch of samples from this space to a JSONable data type."""
# serialize as list of dicts
ret_n = []
for sample in sample_n:
ret = {}
ret["nodes"] = sample.nodes.tolist()
if sample.edges is not None:
ret["edges"] = sample.edges.tolist()
ret["edge_links"] = sample.edge_links.tolist()
ret_n.append(ret)
return ret_n
def from_jsonable(self, sample_n: Sequence[dict]) -> list:
"""Convert a JSONable data type to a batch of samples from this space."""
ret = []
for sample in sample_n:
if "edges" in sample:
ret_n = GraphInstance(
np.asarray(sample["nodes"]),
np.asarray(sample["edges"]),
np.asarray(sample["edge_links"]),
)
else:
ret_n = GraphInstance(
np.asarray(sample["nodes"]),
None,
None,
)
ret.append(ret_n)
return ret
| Graph |
python | pypa__setuptools | setuptools/build_meta.py | {
"start": 2346,
"end": 5125
} | class ____(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers) -> NoReturn:
specifier_list = list(parse_strings(specifiers))
raise SetupRequirementsError(specifier_list)
@classmethod
@contextlib.contextmanager
def patch(cls) -> Iterator[None]:
"""
Replace
distutils.dist.Distribution with this class
for the duration of this context.
"""
orig = distutils.core.Distribution
distutils.core.Distribution = cls # type: ignore[misc] # monkeypatching
try:
yield
finally:
distutils.core.Distribution = orig # type: ignore[misc] # monkeypatching
@contextlib.contextmanager
def no_install_setup_requires():
"""Temporarily disable installing setup_requires
Under PEP 517, the backend reports build dependencies to the frontend,
and the frontend is responsible for ensuring they're installed.
So setuptools (acting as a backend) should not try to install them.
"""
orig = setuptools._install_setup_requires
setuptools._install_setup_requires = lambda attrs: None
try:
yield
finally:
setuptools._install_setup_requires = orig
def _get_immediate_subdirectories(a_dir):
return [
name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))
]
def _file_with_extension(directory: StrPath, extension: str | tuple[str, ...]):
matching = (f for f in os.listdir(directory) if f.endswith(extension))
try:
(file,) = matching
except ValueError:
raise ValueError(
'No distribution was found. Ensure that `setup.py` '
'is not empty and that it calls `setup()`.'
) from None
return file
def _open_setup_script(setup_script):
if not os.path.exists(setup_script):
# Supply a default setup.py
return io.StringIO("from setuptools import setup; setup()")
return tokenize.open(setup_script)
@contextlib.contextmanager
def suppress_known_deprecation():
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'setup.py install is deprecated')
yield
_ConfigSettings: TypeAlias = Union[Mapping[str, Union[str, list[str], None]], None]
"""
Currently the user can run::
pip install -e . --config-settings key=value
python -m build -C--key=value -C key=value
- pip will pass both key and value as strings and overwriting repeated keys
(pypa/pip#11059).
- build will accumulate values associated with repeated keys in a list.
It will also accept keys with no associated value.
This means that an option passed by build can be ``str | list[str] | None``.
- PEP 517 specifies that ``config_settings`` is an optional dict.
"""
| Distribution |
python | PyCQA__pylint | tests/functional/u/unhashable_member.py | {
"start": 105,
"end": 598
} | class ____:
__hash__ = list.__hash__
# Subscripts
{}[[1, 2, 3]] # [unhashable-member]
{}[{}] # [unhashable-member]
{}[Unhashable()] # [unhashable-member]
{'foo': 'bar'}['foo']
{'foo': 'bar'}[42]
# Keys
{[1, 2, 3]: "tomato"} # [unhashable-member]
{
[1, 2, 3]: "tomato", # [unhashable-member]
[4, 5, 6]: "celeriac", # [unhashable-member]
}
{[1, 2, 3]} # [unhashable-member]
{"tomato": "tomahto"}
{dict: {}}
{lambda x: x: "tomato"} # pylint: disable=unnecessary-lambda
| Unhashable |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 18455,
"end": 19819
} | class ____(CDeclaratorNode):
# name string The Cython name being declared
# cname string or None C name, if specified
# default ExprNode or None the value assigned on declaration
child_attrs = ['default']
default = None
def declared_name(self):
return self.name
def analyse(self, base_type, env, nonempty=0, visibility=None, in_pxd=False):
if nonempty and self.name == '':
# May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
error(self.pos, "Missing argument name")
elif base_type.is_void:
error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
else:
self.name = base_type.declaration_code("", for_display=1, pyrex=1)
base_type = py_object_type
if base_type.is_fused and env.fused_to_specific:
try:
base_type = base_type.specialize(env.fused_to_specific)
except CannotSpecialize:
error(self.pos,
"'%s' cannot be specialized since its type is not a fused argument to this function" %
self.name)
self.type = base_type
return self, base_type
| CNameDeclaratorNode |
python | pytorch__pytorch | torch/nn/modules/container.py | {
"start": 1214,
"end": 1397
} | class ____(Module):
def __init__(self, **kwargs: Any) -> None:
super().__init__()
for key, value in kwargs.items():
self.add_module(key, value)
| Container |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 29858,
"end": 30354
} | class ____(_Deannotate, _CoerceLiterals, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
# see #5754 for why we can't easily deprecate this coercion.
# essentially expressions like postgresql_where would have to be
# text() as they come back from reflection and we don't want to
# have text() elements wired into the inspection dictionaries.
return elements.TextClause(element)
| DDLExpressionImpl |
python | numba__numba | numba/tests/test_ir_inlining.py | {
"start": 14230,
"end": 31000
} | class ____(MemoryLeakMixin, InliningBase):
def test_basic_inline_never(self):
def foo():
pass
@overload(foo, inline='never')
def foo_overload():
def foo_impl():
pass
return foo_impl
def impl():
return foo()
self.check(impl, inline_expect={'foo': False})
def test_basic_inline_always(self):
def foo():
pass
@overload(foo, inline='always')
def foo_overload():
def impl():
pass
return impl
def impl():
return foo()
self.check(impl, inline_expect={'foo': True})
def test_inline_always_kw_no_default(self):
# pass call arg by name that doesn't have default value
def foo(a, b):
return a + b
@overload(foo, inline='always')
def overload_foo(a, b):
return lambda a, b: a + b
def impl():
return foo(3, b=4)
self.check(impl, inline_expect={'foo': True})
def test_inline_operators_unary(self):
def impl_inline(x):
return -x
def impl_noinline(x):
return +x
dummy_unary_impl = lambda x: True
Dummy, DummyType = self.make_dummy_type()
setattr(Dummy, '__neg__', dummy_unary_impl)
setattr(Dummy, '__pos__', dummy_unary_impl)
@overload(operator.neg, inline='always')
def overload_dummy_neg(x):
if isinstance(x, DummyType):
return dummy_unary_impl
@overload(operator.pos, inline='never')
def overload_dummy_pos(x):
if isinstance(x, DummyType):
return dummy_unary_impl
self.check(impl_inline, Dummy(), inline_expect={'neg': True})
self.check(impl_noinline, Dummy(), inline_expect={'pos': False})
def test_inline_operators_binop(self):
def impl_inline(x):
return x == 1
def impl_noinline(x):
return x != 1
Dummy, DummyType = self.make_dummy_type()
dummy_binop_impl = lambda a, b: True
setattr(Dummy, '__eq__', dummy_binop_impl)
setattr(Dummy, '__ne__', dummy_binop_impl)
@overload(operator.eq, inline='always')
def overload_dummy_eq(a, b):
if isinstance(a, DummyType):
return dummy_binop_impl
@overload(operator.ne, inline='never')
def overload_dummy_ne(a, b):
if isinstance(a, DummyType):
return dummy_binop_impl
self.check(impl_inline, Dummy(), inline_expect={'eq': True})
self.check(impl_noinline, Dummy(), inline_expect={'ne': False})
def test_inline_operators_inplace_binop(self):
def impl_inline(x):
x += 1
def impl_noinline(x):
x -= 1
Dummy, DummyType = self.make_dummy_type()
dummy_inplace_binop_impl = lambda a, b: True
setattr(Dummy, '__iadd__', dummy_inplace_binop_impl)
setattr(Dummy, '__isub__', dummy_inplace_binop_impl)
@overload(operator.iadd, inline='always')
def overload_dummy_iadd(a, b):
if isinstance(a, DummyType):
return dummy_inplace_binop_impl
@overload(operator.isub, inline='never')
def overload_dummy_isub(a, b):
if isinstance(a, DummyType):
return dummy_inplace_binop_impl
# DummyType is not mutable, so lowering 'inplace_binop' Expr
# re-uses (requires) copying function definition
@overload(operator.add, inline='always')
def overload_dummy_add(a, b):
if isinstance(a, DummyType):
return dummy_inplace_binop_impl
@overload(operator.sub, inline='never')
def overload_dummy_sub(a, b):
if isinstance(a, DummyType):
return dummy_inplace_binop_impl
self.check(impl_inline, Dummy(), inline_expect={'iadd': True})
self.check(impl_noinline, Dummy(), inline_expect={'isub': False})
def test_inline_always_operators_getitem(self):
def impl(x, idx):
return x[idx]
def impl_static_getitem(x):
return x[1]
Dummy, DummyType = self.make_dummy_type()
dummy_getitem_impl = lambda obj, idx: None
setattr(Dummy, '__getitem__', dummy_getitem_impl)
@overload(operator.getitem, inline='always')
def overload_dummy_getitem(obj, idx):
if isinstance(obj, DummyType):
return dummy_getitem_impl
# note getitem and static_getitem Exprs refer to operator.getitem
# hence they are checked using the same expected key
self.check(impl, Dummy(), 1, inline_expect={'getitem': True})
self.check(impl_static_getitem, Dummy(),
inline_expect={'getitem': True})
def test_inline_never_operators_getitem(self):
def impl(x, idx):
return x[idx]
def impl_static_getitem(x):
return x[1]
Dummy, DummyType = self.make_dummy_type()
dummy_getitem_impl = lambda obj, idx: None
setattr(Dummy, '__getitem__', dummy_getitem_impl)
@overload(operator.getitem, inline='never')
def overload_dummy_getitem(obj, idx):
if isinstance(obj, DummyType):
return dummy_getitem_impl
# both getitem and static_getitem Exprs refer to operator.getitem
# hence they are checked using the same expect key
self.check(impl, Dummy(), 1, inline_expect={'getitem': False})
self.check(impl_static_getitem, Dummy(),
inline_expect={'getitem': False})
def test_inline_stararg_error(self):
def foo(a, *b):
return a + b[0]
@overload(foo, inline='always')
def overload_foo(a, *b):
return lambda a, *b: a + b[0]
def impl():
return foo(3, 3, 5)
with self.assertRaises(NotImplementedError) as e:
self.check(impl, inline_expect={'foo': True})
self.assertIn("Stararg not supported in inliner for arg 1 *b",
str(e.exception))
def test_basic_inline_combos(self):
def impl():
x = foo()
y = bar()
z = baz()
return x, y, z
opts = (('always'), ('never'))
for inline_foo, inline_bar, inline_baz in product(opts, opts, opts):
def foo():
pass
def bar():
pass
def baz():
pass
@overload(foo, inline=inline_foo)
def foo_overload():
def impl():
return
return impl
@overload(bar, inline=inline_bar)
def bar_overload():
def impl():
return
return impl
@overload(baz, inline=inline_baz)
def baz_overload():
def impl():
return
return impl
inline_expect = {'foo': self.inline_opt_as_bool[inline_foo],
'bar': self.inline_opt_as_bool[inline_bar],
'baz': self.inline_opt_as_bool[inline_baz]}
self.check(impl, inline_expect=inline_expect)
def test_freevar_bindings(self):
def impl():
x = foo()
y = bar()
z = baz()
return x, y, z
opts = (('always'), ('never'))
for inline_foo, inline_bar, inline_baz in product(opts, opts, opts):
# need to repeatedly clobber definitions of foo, bar, baz so
# @overload binds to the right instance WRT inlining
def foo():
x = 10
y = 20
z = x + 12
return (x, y + 3, z)
def bar():
x = 30
y = 40
z = x + 12
return (x, y + 3, z)
def baz():
x = 60
y = 80
z = x + 12
return (x, y + 3, z)
def factory(target, x, y, inline=None):
z = x + 12
@overload(target, inline=inline)
def func():
def impl():
return (x, y + 3, z)
return impl
factory(foo, 10, 20, inline=inline_foo)
factory(bar, 30, 40, inline=inline_bar)
factory(baz, 60, 80, inline=inline_baz)
inline_expect = {'foo': self.inline_opt_as_bool[inline_foo],
'bar': self.inline_opt_as_bool[inline_bar],
'baz': self.inline_opt_as_bool[inline_baz]}
self.check(impl, inline_expect=inline_expect)
def test_global_overload_binding(self):
def impl():
z = 19
return _global_defn(z)
self.check(impl, inline_expect={'_global_defn': True})
def test_inline_from_another_module(self):
from .inlining_usecases import baz
def impl():
z = _GLOBAL1 + 2
return baz(), z
self.check(impl, inline_expect={'baz': True})
def test_inline_from_another_module_w_getattr(self):
import numba.tests.inlining_usecases as iuc
def impl():
z = _GLOBAL1 + 2
return iuc.baz(), z
self.check(impl, inline_expect={'baz': True})
def test_inline_from_another_module_w_2_getattr(self):
import numba.tests.inlining_usecases # noqa forces registration
import numba.tests as nt
def impl():
z = _GLOBAL1 + 2
return nt.inlining_usecases.baz(), z
self.check(impl, inline_expect={'baz': True})
def test_inline_from_another_module_as_freevar(self):
def factory():
from .inlining_usecases import baz
@njit(inline='always')
def tmp():
return baz()
return tmp
bop = factory()
def impl():
z = _GLOBAL1 + 2
return bop(), z
self.check(impl, inline_expect={'baz': True})
def test_inline_w_freevar_from_another_module(self):
from .inlining_usecases import bop_factory
def gen(a, b):
bar = bop_factory(a)
def impl():
z = _GLOBAL1 + a * b
return bar(), z, a
return impl
impl = gen(10, 20)
self.check(impl, inline_expect={'bar': True})
def test_inlining_models(self):
def s17_caller_model(expr, caller_info, callee_info):
self.assertIsInstance(expr, ir.Expr)
self.assertEqual(expr.op, "call")
return self.sentinel_17_cost_model(caller_info.func_ir)
def s17_callee_model(expr, caller_info, callee_info):
self.assertIsInstance(expr, ir.Expr)
self.assertEqual(expr.op, "call")
return self.sentinel_17_cost_model(callee_info.func_ir)
# caller has sentinel
for caller, callee in ((10, 11), (17, 11)):
def foo():
return callee
@overload(foo, inline=s17_caller_model)
def foo_ol():
def impl():
return callee
return impl
def impl(z):
x = z + caller
y = foo()
return y + 3, x
self.check(impl, 10, inline_expect={'foo': caller == 17})
# callee has sentinel
for caller, callee in ((11, 17), (11, 10)):
def bar():
return callee
@overload(bar, inline=s17_callee_model)
def bar_ol():
def impl():
return callee
return impl
def impl(z):
x = z + caller
y = bar()
return y + 3, x
self.check(impl, 10, inline_expect={'bar': callee == 17})
def test_multiple_overloads_with_different_inline_characteristics(self):
# check that having different inlining options for different overloads
# of the same function works ok
# this is the Python equiv of the overloads below
def bar(x):
if isinstance(typeof(x), types.Float):
return x + 1234
else:
return x + 1
@overload(bar, inline='always')
def bar_int_ol(x):
if isinstance(x, types.Integer):
def impl(x):
return x + 1
return impl
@overload(bar, inline='never')
def bar_float_ol(x):
if isinstance(x, types.Float):
def impl(x):
return x + 1234
return impl
def always_inline_cost_model(*args):
return True
@overload(bar, inline=always_inline_cost_model)
def bar_complex_ol(x):
if isinstance(x, types.Complex):
def impl(x):
return x + 1
return impl
def impl():
a = bar(1) # integer literal, should inline
b = bar(2.3) # float literal, should not inline
# complex literal, should inline by virtue of cost model
c = bar(3j)
return a + b + c
# there should still be a `bar` not inlined
fir = self.check(impl, inline_expect={'bar': False}, block_count=1)
# check there is one call left in the IR
block = next(iter(fir.blocks.items()))[1]
calls = [x for x in block.find_exprs(op='call')]
self.assertTrue(len(calls) == 1)
# check that the constant "1234" is not in the IR
consts = [x.value for x in block.find_insts(ir.Assign)
if isinstance(getattr(x, 'value', None), ir.Const)]
for val in consts:
self.assertNotEqual(val.value, 1234)
def test_overload_inline_always_with_literally_in_inlinee(self):
# See issue #5887
def foo_ovld(dtype):
if not isinstance(dtype, types.StringLiteral):
def foo_noop(dtype):
return literally(dtype)
return foo_noop
if dtype.literal_value == 'str':
def foo_as_str_impl(dtype):
return 10
return foo_as_str_impl
if dtype.literal_value in ('int64', 'float64'):
def foo_as_num_impl(dtype):
return 20
return foo_as_num_impl
# define foo for literal str 'str'
def foo(dtype):
return 10
overload(foo, inline='always')(foo_ovld)
def test_impl(dtype):
return foo(dtype)
# check literal dispatch on 'str'
dtype = 'str'
self.check(test_impl, dtype, inline_expect={'foo': True})
# redefine foo to be correct for literal str 'int64'
def foo(dtype):
return 20
overload(foo, inline='always')(foo_ovld)
# check literal dispatch on 'int64'
dtype = 'int64'
self.check(test_impl, dtype, inline_expect={'foo': True})
def test_inline_always_ssa(self):
# Make sure IR inlining uses SSA properly. Test for #6721.
dummy_true = True
def foo(A):
return True
@overload(foo, inline="always")
def foo_overload(A):
def impl(A):
s = dummy_true
for i in range(len(A)):
dummy = dummy_true
if A[i]:
dummy = A[i]
s *= dummy
return s
return impl
def impl():
return foo(np.array([True, False, True]))
self.check(impl, block_count='SKIP', inline_expect={'foo': True})
def test_inline_always_ssa_scope_validity(self):
# Make sure IR inlining correctly updates the scope(s). See #7802
def bar():
b = 5
while b > 1:
b //= 2
return 10
@overload(bar, inline="always")
def bar_impl():
return bar
@njit
def foo():
bar()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', errors.NumbaIRAssumptionWarning)
ignore_internal_warnings()
self.assertEqual(foo(), foo.py_func())
# There should be no warnings as the IR scopes should be consistent with
# the IR involved.
self.assertEqual(len(w), 0)
| TestOverloadInlining |
python | falconry__falcon | falcon/app.py | {
"start": 56387,
"end": 57019
} | class ____(App):
"""Compatibility alias of :class:`falcon.App`.
``API`` was renamed to :class:`App <falcon.App>` in Falcon 3.0 in order to
reflect the breadth of applications that :class:`App <falcon.App>`, and its
ASGI counterpart in particular, can now be used for.
.. deprecated:: 3.0
This compatibility alias is deprecated; it will be removed entirely in
Falcon 5.0.
"""
@deprecation.deprecated(
'The API class will be removed in Falcon 5.0, use falcon.App instead.'
)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| API |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 1825,
"end": 1868
} | class ____:
def foo(self):
pass
| A |
python | spack__spack | lib/spack/spack/test/util/unparse/unparse.py | {
"start": 3163,
"end": 14405
} | class ____:
x: int
y: int
def location(point):
match point:
case Point(x=0, y=0):
print("Origin is the point's location.")
case Point(x=0, y=y):
print(f"Y={y} and the point is on the y-axis.")
case Point(x=x, y=0):
print(f"X={x} and the point is on the x-axis.")
case Point():
print("The point is located somewhere else on the plane.")
case _:
print("Not a point")
"""
match_nested = """\
match points:
case []:
print("No points in the list.")
case [Point(0, 0)]:
print("The origin is the only point in the list.")
case [Point(x, y)]:
print(f"A single point {x}, {y} is in the list.")
case [Point(0, y1), Point(0, y2)]:
print(f"Two points on the Y axis at {y1}, {y2} are in the list.")
case _:
print("Something else is found in the list.")
"""
def check_ast_roundtrip(code1, filename="internal", mode="exec"):
ast1 = compile(str(code1), filename, mode, ast.PyCF_ONLY_AST)
code2 = spack.util.unparse.unparse(ast1)
ast2 = compile(code2, filename, mode, ast.PyCF_ONLY_AST)
error_msg = "Failed to roundtrip {} [mode={}]".format(filename, mode)
assert ast.dump(ast1) == ast.dump(ast2), error_msg
def test_core_lib_files():
"""Roundtrip source files from the Python core libs."""
test_directories = [
os.path.join(
getattr(sys, "real_prefix", sys.prefix), "lib", "python%s.%s" % sys.version_info[:2]
)
]
names = []
for test_dir in test_directories:
for n in os.listdir(test_dir):
if n.endswith(".py") and not n.startswith("bad"):
names.append(os.path.join(test_dir, n))
for filename in names:
source = read_pyfile(filename)
check_ast_roundtrip(source)
@pytest.mark.skipif(sys.version_info[:2] < (3, 6), reason="Only for Python 3.6 or greater")
def test_simple_fstring():
check_ast_roundtrip("f'{x}'")
@pytest.mark.skipif(sys.version_info[:2] < (3, 6), reason="Only for Python 3.6 or greater")
def test_fstrings():
# See issue 25180
check_ast_roundtrip(r"""f'{f"{0}"*3}'""")
check_ast_roundtrip(r"""f'{f"{y}"*3}'""")
check_ast_roundtrip("""f''""")
check_ast_roundtrip('''f"""'end' "quote\\""""''')
@pytest.mark.skipif(sys.version_info[:2] < (3, 6), reason="Only for Python 3.6 or greater")
def test_fstrings_complicated():
# See issue 28002
check_ast_roundtrip("""f'''{"'"}'''""")
check_ast_roundtrip('''f\'\'\'-{f"""*{f"+{f'.{x}.'}+"}*"""}-\'\'\'''')
check_ast_roundtrip('''f\'\'\'-{f"""*{f"+{f'.{x}.'}+"}*"""}-'single quote\\'\'\'\'''')
check_ast_roundtrip("f\"\"\"{'''\n'''}\"\"\"")
check_ast_roundtrip("f\"\"\"{g('''\n''')}\"\"\"")
check_ast_roundtrip('''f"a\\r\\nb"''')
check_ast_roundtrip('''f"\\u2028{'x'}"''')
def test_parser_modes():
for mode in ["exec", "single", "eval"]:
check_ast_roundtrip(code_parseable_in_all_parser_modes, mode=mode)
def test_del_statement():
check_ast_roundtrip("del x, y, z")
def test_shifts():
check_ast_roundtrip("45 << 2")
check_ast_roundtrip("13 >> 7")
def test_for_else():
check_ast_roundtrip(for_else)
def test_while_else():
check_ast_roundtrip(while_else)
def test_unary_parens():
check_ast_roundtrip("(-1)**7")
check_ast_roundtrip("(-1.)**8")
check_ast_roundtrip("(-1j)**6")
check_ast_roundtrip("not True or False")
check_ast_roundtrip("True or not False")
@pytest.mark.skipif(sys.version_info >= (3, 6), reason="Only works for Python < 3.6")
def test_integer_parens():
check_ast_roundtrip("3 .__abs__()")
def test_huge_float():
check_ast_roundtrip("1e1000")
check_ast_roundtrip("-1e1000")
check_ast_roundtrip("1e1000j")
check_ast_roundtrip("-1e1000j")
def test_min_int30():
check_ast_roundtrip(str(-(2**31)))
check_ast_roundtrip(str(-(2**63)))
def test_imaginary_literals():
check_ast_roundtrip("7j")
check_ast_roundtrip("-7j")
check_ast_roundtrip("0j")
check_ast_roundtrip("-0j")
def test_negative_zero():
check_ast_roundtrip("-0")
check_ast_roundtrip("-(0)")
check_ast_roundtrip("-0b0")
check_ast_roundtrip("-(0b0)")
check_ast_roundtrip("-0o0")
check_ast_roundtrip("-(0o0)")
check_ast_roundtrip("-0x0")
check_ast_roundtrip("-(0x0)")
def test_lambda_parentheses():
check_ast_roundtrip("(lambda: int)()")
def test_chained_comparisons():
check_ast_roundtrip("1 < 4 <= 5")
check_ast_roundtrip("a is b is c is not d")
def test_function_arguments():
check_ast_roundtrip("def f(): pass")
check_ast_roundtrip("def f(a): pass")
check_ast_roundtrip("def f(b = 2): pass")
check_ast_roundtrip("def f(a, b): pass")
check_ast_roundtrip("def f(a, b = 2): pass")
check_ast_roundtrip("def f(a = 5, b = 2): pass")
check_ast_roundtrip("def f(*args, **kwargs): pass")
check_ast_roundtrip("def f(*, a = 1, b = 2): pass")
check_ast_roundtrip("def f(*, a = 1, b): pass")
check_ast_roundtrip("def f(*, a, b = 2): pass")
check_ast_roundtrip("def f(a, b = None, *, c, **kwds): pass")
check_ast_roundtrip("def f(a=2, *args, c=5, d, **kwds): pass")
def test_relative_import():
check_ast_roundtrip(relative_import)
def test_import_many():
check_ast_roundtrip(import_many)
def test_nonlocal():
check_ast_roundtrip(nonlocal_ex)
def test_raise_from():
check_ast_roundtrip(raise_from)
def test_bytes():
check_ast_roundtrip("b'123'")
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Not supported < 3.6")
def test_formatted_value():
check_ast_roundtrip('f"{value}"')
check_ast_roundtrip('f"{value!s}"')
check_ast_roundtrip('f"{value:4}"')
check_ast_roundtrip('f"{value!s:4}"')
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Not supported < 3.6")
def test_joined_str():
check_ast_roundtrip('f"{key}={value!s}"')
check_ast_roundtrip('f"{key}={value!r}"')
check_ast_roundtrip('f"{key}={value!a}"')
@pytest.mark.skipif(sys.version_info != (3, 6, 0), reason="Only supported on 3.6.0")
def test_joined_str_361():
check_ast_roundtrip('f"{key:4}={value!s}"')
check_ast_roundtrip('f"{key:02}={value!r}"')
check_ast_roundtrip('f"{key:6}={value!a}"')
check_ast_roundtrip('f"{key:4}={value:#06x}"')
check_ast_roundtrip('f"{key:02}={value:#06x}"')
check_ast_roundtrip('f"{key:6}={value:#06x}"')
check_ast_roundtrip('f"{key:4}={value!s:#06x}"')
check_ast_roundtrip('f"{key:4}={value!r:#06x}"')
check_ast_roundtrip('f"{key:4}={value!a:#06x}"')
@pytest.mark.skipif(sys.version_info[:2] < (3, 6), reason="Only for Python 3.6 or greater")
def test_complex_f_string():
check_ast_roundtrip(complex_f_string)
def test_annotations():
check_ast_roundtrip("def f(a : int): pass")
check_ast_roundtrip("def f(a: int = 5): pass")
check_ast_roundtrip("def f(*args: [int]): pass")
check_ast_roundtrip("def f(**kwargs: dict): pass")
check_ast_roundtrip("def f() -> None: pass")
@pytest.mark.skipif(sys.version_info < (2, 7), reason="Not supported < 2.7")
def test_set_literal():
check_ast_roundtrip("{'a', 'b', 'c'}")
@pytest.mark.skipif(sys.version_info < (2, 7), reason="Not supported < 2.7")
def test_set_comprehension():
check_ast_roundtrip("{x for x in range(5)}")
@pytest.mark.skipif(sys.version_info < (2, 7), reason="Not supported < 2.7")
def test_dict_comprehension():
check_ast_roundtrip("{x: x*x for x in range(10)}")
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Not supported < 3.6")
def test_dict_with_unpacking():
check_ast_roundtrip("{**x}")
check_ast_roundtrip("{a: b, **x}")
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Not supported < 3.6")
def test_async_comp_and_gen_in_async_function():
check_ast_roundtrip(async_comprehensions_and_generators)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Not supported < 3.7")
def test_async_comprehension():
check_ast_roundtrip("{i async for i in aiter() if i % 2}")
check_ast_roundtrip("[i async for i in aiter() if i % 2]")
check_ast_roundtrip("{i: -i async for i in aiter() if i % 2}")
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Not supported < 3.7")
def test_async_generator_expression():
check_ast_roundtrip("(i ** 2 async for i in agen())")
check_ast_roundtrip("(i - 1 async for i in agen() if i % 2)")
def test_class_decorators():
check_ast_roundtrip(class_decorator)
def test_class_definition():
check_ast_roundtrip("class A(metaclass=type, *[], **{}): pass")
def test_elifs():
check_ast_roundtrip(elif1)
check_ast_roundtrip(elif2)
def test_try_except_finally():
check_ast_roundtrip(try_except_finally)
def test_starred_assignment():
check_ast_roundtrip("a, *b, c = seq")
check_ast_roundtrip("a, (*b, c) = seq")
check_ast_roundtrip("a, *b[0], c = seq")
check_ast_roundtrip("a, *(b, c) = seq")
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Not supported < 3.6")
def test_variable_annotation():
check_ast_roundtrip("a: int")
check_ast_roundtrip("a: int = 0")
check_ast_roundtrip("a: int = None")
check_ast_roundtrip("some_list: List[int]")
check_ast_roundtrip("some_list: List[int] = []")
check_ast_roundtrip("t: Tuple[int, ...] = (1, 2, 3)")
check_ast_roundtrip("(a): int")
check_ast_roundtrip("(a): int = 0")
check_ast_roundtrip("(a): int = None")
def test_with_simple():
check_ast_roundtrip(with_simple)
def test_with_as():
check_ast_roundtrip(with_as)
@pytest.mark.skipif(sys.version_info < (2, 7), reason="Not supported < 2.7")
def test_with_two_items():
check_ast_roundtrip(with_two_items)
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Not supported < 3.5")
def test_async_function_def():
check_ast_roundtrip(async_function_def)
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Not supported < 3.5")
def test_async_for():
check_ast_roundtrip(async_for)
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Not supported < 3.5")
def test_async_with():
check_ast_roundtrip(async_with)
@pytest.mark.skipif(sys.version_info < (3, 5), reason="Not supported < 3.5")
def test_async_with_as():
check_ast_roundtrip(async_with_as)
@pytest.mark.skipif(sys.version_info < (3, 10), reason="Not supported < 3.10")
@pytest.mark.parametrize(
"literal",
[match_literal, match_with_noop, match_literal_and_variable, match_classes, match_nested],
)
def test_match_literal(literal):
check_ast_roundtrip(literal)
def test_subscript_with_tuple():
"""Test change in visit_Subscript/visit_Index is_non_empty_tuple."""
check_ast_roundtrip("a[()]")
check_ast_roundtrip("a[b]")
check_ast_roundtrip("a[(*b,)]")
check_ast_roundtrip("a[(1, 2)]")
check_ast_roundtrip("a[(1, *b)]")
@pytest.mark.skipif(sys.version_info < (3, 11), reason="Not supported < 3.11")
def test_subscript_without_tuple():
"""Test change in visit_Subscript/visit_Index is_non_empty_tuple."""
check_ast_roundtrip("a[*b]")
check_ast_roundtrip("a[1, *b]")
def test_attribute_on_int():
check_ast_roundtrip("1 .__abs__()")
| Point |
python | doocs__leetcode | solution/2300-2399/2350.Shortest Impossible Sequence of Rolls/Solution.py | {
"start": 0,
"end": 258
} | class ____:
def shortestSequence(self, rolls: List[int], k: int) -> int:
ans = 1
s = set()
for v in rolls:
s.add(v)
if len(s) == k:
ans += 1
s.clear()
return ans
| Solution |
python | apache__airflow | providers/apache/cassandra/tests/unit/apache/cassandra/sensors/test_table.py | {
"start": 1075,
"end": 3103
} | class ____:
@patch("airflow.providers.apache.cassandra.sensors.table.CassandraHook")
def test_poke(self, mock_hook):
sensor = CassandraTableSensor(
task_id="test_task",
cassandra_conn_id=TEST_CASSANDRA_CONN_ID,
table=TEST_CASSANDRA_TABLE,
)
exists = sensor.poke(dict())
assert exists
mock_hook.return_value.table_exists.assert_called_once_with(TEST_CASSANDRA_TABLE)
mock_hook.assert_called_once_with(TEST_CASSANDRA_CONN_ID)
@patch("airflow.providers.apache.cassandra.sensors.table.CassandraHook")
def test_poke_should_return_false_for_non_existing_table(self, mock_hook):
mock_hook.return_value.table_exists.return_value = False
sensor = CassandraTableSensor(
task_id="test_task",
cassandra_conn_id=TEST_CASSANDRA_CONN_ID,
table=TEST_CASSANDRA_TABLE,
)
exists = sensor.poke(dict())
assert not exists
mock_hook.return_value.table_exists.assert_called_once_with(TEST_CASSANDRA_TABLE)
mock_hook.assert_called_once_with(TEST_CASSANDRA_CONN_ID)
@patch("airflow.providers.apache.cassandra.sensors.table.CassandraHook")
def test_poke_should_succeed_for_table_with_mentioned_keyspace(self, mock_hook):
sensor = CassandraTableSensor(
task_id="test_task",
cassandra_conn_id=TEST_CASSANDRA_CONN_ID,
table=TEST_CASSANDRA_TABLE_WITH_KEYSPACE,
)
exists = sensor.poke(dict())
assert exists
mock_hook.return_value.table_exists.assert_called_once_with(TEST_CASSANDRA_TABLE_WITH_KEYSPACE)
mock_hook.assert_called_once_with(TEST_CASSANDRA_CONN_ID)
@patch("airflow.providers.apache.cassandra.sensors.table.CassandraHook")
def test_init_with_default_conn(self, mock_hook):
sensor = CassandraTableSensor(task_id="test_task", table=TEST_CASSANDRA_TABLE)
assert sensor.cassandra_conn_id == TEST_CASSANDRA_CONN_ID
| TestCassandraTableSensor |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 3708,
"end": 3874
} | class ____:
def __str__(self, weird_extra_arg) -> str:
...
def __repr__(self, weird_extra_arg_with_default=...) -> str:
...
@final
| FineAndDandy |
python | pytorch__pytorch | torch/distributed/tensor/experimental/_context_parallel/_load_balancer.py | {
"start": 3428,
"end": 7778
} | class ____(_LoadBalancer):
def __init__(self, seq_length: int, world_size: int, device: str | torch.device):
self.seq_length = seq_length
self.world_size = world_size
self.device = device
def _generate_indices(self, restore: bool = False) -> Tensor:
"""
Generate head-and-tail load balancing indices or restore indices.
Args:
restore:
If True, generate restore indices that map head-and-tail rearranged
positions back to original positions. If False, generate load
balance indices that rearrange original positions to head-and-tail pattern.
Returns:
The generated indices of shape `(1, seq_len)` because the load-balancing is
identical within the batch.
Warning:
For Multi-Head Attention, we require the masks over the head dimension are identical
(i.e. the return value of `_generate_indices()` does not have `heads` dimension).
Example:
Here is the causal mask for attention where q_len == kv_len == 8:
KV_index
[1, 0, 0, 0, 0, 0, 0, 0]
[1, 1, 0, 0, 0, 0, 0, 0]
[1, 1, 1, 0, 0, 0, 0, 0]
Q_index [1, 1, 1, 1, 0, 0, 0, 0]
[1, 1, 1, 1, 1, 0, 0, 0]
[1, 1, 1, 1, 1, 1, 0, 0]
[1, 1, 1, 1, 1, 1, 1, 0]
[1, 1, 1, 1, 1, 1, 1, 1]
Head-tail load-balance strategy rearranges the Q tensor by combining
Q[0:k] (on seq dim) and Q[-k:] for rank 0, Q[k:2k] and Q[-2k:-k] for
rank 1, and so on. In python code it looks like:
k = Q.size(0) // (2 * cp_world_size)
for rank in range(cp_world_size):
reordered_Q[rank * 2 * k : (rank + 1) * 2 * k] = torch.cat(
(Q[rank * k : (rank + 1) * k], Q[-(rank + 1) * k : -rank * k])
)
This can also be done by tensor slicing. For the above example, the indices
tensor for slicing is:
slice_indices = Tensor([0, 7, 1, 6, 2, 5, 3, 4])
After reordering QKV using the `slice_indices`, the corresponding mask matrix
distributing over 2 devices becomes well-balanced:
KV_index
[1, 0, 0, 0, 0, 0, 0, 0]
[1, 1, 1, 1, 1, 1, 1, 1]
[1, 1, 0, 0, 0, 0, 0, 0] rank 0
[1, 1, 1, 1, 1, 1, 1, 0]
Q_index ------------------------
[1, 1, 1, 0, 0, 0, 0, 0]
[1, 1, 1, 1, 1, 1, 0, 0] rank 1
[1, 1, 1, 1, 0, 0, 0, 0]
[1, 1, 1, 1, 1, 0, 0, 0]
To restore the reordering and putting the tensor back, slicing op can do the
trick with a `restore_indices` such that:
slice_indices[restore_indices] == Tensor([0, 1, 2, ...])
In this way, `reordered_Q[restore_indices]` will just be the original Q.
"""
seq_length = self.seq_length
world_size = self.world_size
assert seq_length % (world_size * 2) == 0
chunk_size = seq_length // (world_size * 2)
all_indices = []
for rank in range(world_size):
# Generate indices for first chunk of the cp rank
first_chunk_start = rank * chunk_size
first_chunk_indices = list(
range(first_chunk_start, first_chunk_start + chunk_size)
)
# Second chunk: positions from the complementary chunk
second_chunk_idx = world_size * 2 - rank - 1
second_chunk_start = second_chunk_idx * chunk_size
second_chunk_indices = list(
range(second_chunk_start, second_chunk_start + chunk_size)
)
# combine the indices for this rank
all_indices.extend(first_chunk_indices + second_chunk_indices)
all_indices_tensor = torch.tensor(
all_indices, dtype=torch.int, device=self.device
)
if restore:
all_indices_tensor = torch.argsort(all_indices_tensor)
return all_indices_tensor.unsqueeze(0) # add batch dim
| _HeadTailLoadBalancer |
python | doocs__leetcode | solution/1500-1599/1553.Minimum Number of Days to Eat N Oranges/Solution.py | {
"start": 0,
"end": 240
} | class ____:
def minDays(self, n: int) -> int:
@cache
def dfs(n: int) -> int:
if n < 2:
return n
return 1 + min(n % 2 + dfs(n // 2), n % 3 + dfs(n // 3))
return dfs(n)
| Solution |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py | {
"start": 10281,
"end": 10360
} | class ____(MimiConv1dPaddingCache):
pass
| KyutaiSpeechToTextConv1dPaddingCache |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 868570,
"end": 874506
} | class ____(sgqlc.types.Type, Closable, Updatable, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"created_at",
"creator",
"database_id",
"field",
"fields",
"items",
"number",
"owner",
"public",
"readme",
"repositories",
"resource_path",
"short_description",
"title",
"updated_at",
"url",
"view",
"views",
)
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
creator = sgqlc.types.Field(Actor, graphql_name="creator")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
field = sgqlc.types.Field(
"ProjectV2FieldConfiguration",
graphql_name="field",
args=sgqlc.types.ArgDict(
(
(
"name",
sgqlc.types.Arg(
sgqlc.types.non_null(String), graphql_name="name", default=None
),
),
)
),
)
fields = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2FieldConfigurationConnection),
graphql_name="fields",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
ProjectV2FieldOrder,
graphql_name="orderBy",
default={"field": "POSITION", "direction": "ASC"},
),
),
)
),
)
items = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2ItemConnection),
graphql_name="items",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
ProjectV2ItemOrder,
graphql_name="orderBy",
default={"field": "POSITION", "direction": "ASC"},
),
),
)
),
)
number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number")
owner = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2Owner), graphql_name="owner"
)
public = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="public")
readme = sgqlc.types.Field(String, graphql_name="readme")
repositories = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="repositories",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
RepositoryOrder,
graphql_name="orderBy",
default={"field": "CREATED_AT", "direction": "DESC"},
),
),
)
),
)
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
short_description = sgqlc.types.Field(String, graphql_name="shortDescription")
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
view = sgqlc.types.Field(
"ProjectV2View",
graphql_name="view",
args=sgqlc.types.ArgDict(
(
(
"number",
sgqlc.types.Arg(
sgqlc.types.non_null(Int), graphql_name="number", default=None
),
),
)
),
)
views = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2ViewConnection),
graphql_name="views",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
ProjectV2ViewOrder,
graphql_name="orderBy",
default={"field": "POSITION", "direction": "ASC"},
),
),
)
),
)
| ProjectV2 |
python | astropy__astropy | astropy/modeling/fitting.py | {
"start": 44588,
"end": 56881
} | class ____(Fitter):
"""
Base class for Non-Linear least-squares fitters.
Parameters
----------
calc_uncertainties : bool
If the covariance matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds : bool
If set, the parameter bounds for a model will be enforced for each given
parameter while fitting via a simple min/max condition.
Default: True
"""
supported_constraints = ["fixed", "tied", "bounds"]
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=True):
self.fit_info = None
self._calc_uncertainties = calc_uncertainties
self._use_min_max_bounds = use_min_max_bounds
def objective_function(self, fps, *args, fit_param_indices=None):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
fit_param_indices : list, optional
The ``fit_param_indices`` as returned by ``model_to_fit_params``.
This is a list of the parameter indices being fit, so excluding any
tied or fixed parameters. This can be passed in to the objective
function to prevent it having to be computed on every call.
This must be optional as not all fitters support passing kwargs to
the objective function.
"""
model = args[0]
weights = args[1]
inputs = args[2:-1]
meas = args[-1]
fps = fitter_to_model_params_array(
model,
fps,
self._use_min_max_bounds,
fit_param_indices=fit_param_indices,
)
if weights is None:
value = np.ravel(model.evaluate(*inputs, *fps) - meas)
else:
value = np.ravel(weights * (model.evaluate(*inputs, *fps) - meas))
if not np.all(np.isfinite(value)):
raise NonFiniteValueError(
"Objective function has encountered a non-finite value, "
"this will cause the fit to fail!\n"
"This can be caused by non-finite values in the input data "
"or weights, which can be removed with fit(..., "
"filter_non_finite=True), or by diverging model parameters "
"that yield non-finite model values."
)
return value
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None, fit_param_indices=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if model.has_fixed or model.has_tied:
# update the parameters with the current values from the fitter
fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array(
[np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)]
)
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars], True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
fit_deriv = np.array(model.fit_deriv(x, *params))
try:
output = np.array(
[np.ravel(_) for _ in np.array(weights) * fit_deriv]
)
if output.shape != fit_deriv.shape:
output = np.array(
[np.ravel(_) for _ in np.atleast_2d(weights).T * fit_deriv]
)
return output
except ValueError:
return np.array(
[
np.ravel(_)
for _ in np.array(weights) * np.moveaxis(fit_deriv, -1, 0)
]
).transpose()
else:
if not model.col_fit_deriv:
return [
np.ravel(_)
for _ in (
np.ravel(weights)
* np.array(model.fit_deriv(x, y, *params)).T
).T
]
return [
np.ravel(_)
for _ in weights * np.array(model.fit_deriv(x, y, *params))
]
def _compute_param_cov(
self, model, y, init_values, cov_x, fitparams, farg, fkwarg, weights=None
):
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
self.fit_info["param_cov"] = cov_x
if weights is None:
# if there are no measurement uncertainties given in `weights`,
# fall back on the default behavior in scipy.optimize.curve_fit
# when `absolute_sigma == False`. If there are uncertainties,
# assume they are "absolute" and not "relative".
# For details, see curve_fit:
# https://github.com/scipy/scipy/blob/
# c1ed5ece8ffbf05356a22a8106affcd11bd3aee0/scipy/
# optimize/_minpack_py.py#L591-L602
sum_sqrs = np.sum(
self.objective_function(fitparams, *farg, **fkwarg) ** 2
)
dof = len(y) - len(init_values)
self.fit_info["param_cov"] *= sum_sqrs / dof
else:
self.fit_info["param_cov"] = None
if self._calc_uncertainties is True:
if self.fit_info["param_cov"] is not None:
self._add_fitting_uncertainties(model, self.fit_info["param_cov"])
def _run_fitter(
self, model, farg, fkwarg, maxiter, acc, epsilon, estimate_jacobian
):
return None, None, None
def _filter_non_finite(self, x, y, z=None, weights=None):
"""
Filter out non-finite values in x, y, z, and weights.
Returns
-------
x, y, z, weights : `~numpy.ndarray`
x, y, z, and weights with non-finite values filtered out.
"""
MESSAGE = "Non-Finite input data has been removed by the fitter."
mask = np.isfinite(x) & np.isfinite(y)
if z is not None:
mask &= np.isfinite(z)
if weights is not None:
mask &= np.isfinite(weights)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
if not np.any(mask):
raise ValueError("All input data or weights are non-finite.")
return (
x[mask],
y[mask],
None if z is None else z[mask],
None if weights is None else weights[mask],
)
@fitter_unit_support
def __call__(
self,
model,
x,
y,
z=None,
weights=None,
maxiter=DEFAULT_MAXITER,
acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS,
estimate_jacobian=False,
filter_non_finite=False,
*,
inplace=False,
):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting. For data with Gaussian uncertainties, the weights
should be 1/sigma.
.. versionchanged:: 5.3
Calculate parameter covariances while accounting for ``weights``
as "absolute" inverse uncertainties. To recover the old behavior,
choose ``weights=None``.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
filter_non_finite : bool, optional
Whether or not to filter data with non-finite values. Default is False
inplace : bool, optional
If `False` (the default), a copy of the model with the fitted
parameters set will be returned. If `True`, the returned model will
be the same instance as the model passed in, and the parameter
values will be changed inplace.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
If ``inplace`` is `False` (the default), this is a copy of the
input model with parameters set by the fitter. If ``inplace`` is
`True`, this is the same model as the input model, with parameters
updated to be those set by the fitter.
"""
model_copy = _validate_model(
model,
self.supported_constraints,
copy=not inplace,
)
model_copy.sync_constraints = False
_, fit_param_indices, _ = model_to_fit_params(model_copy)
if filter_non_finite:
x, y, z, weights = self._filter_non_finite(x, y, z, weights)
farg = (
model_copy,
weights,
) + _convert_input(x, y, z)
fkwarg = {"fit_param_indices": set(fit_param_indices)}
init_values, fitparams, cov_x = self._run_fitter(
model_copy, farg, fkwarg, maxiter, acc, epsilon, estimate_jacobian
)
self._compute_param_cov(
model_copy, y, init_values, cov_x, fitparams, farg, fkwarg, weights
)
model_copy.sync_constraints = True
return model_copy
| _NonLinearLSQFitter |
python | sphinx-doc__sphinx | sphinx/util/cfamily.py | {
"start": 6624,
"end": 7296
} | class ____(ASTAttribute):
"""For paren attributes defined by the user."""
def __init__(self, id: str, arg: str) -> None:
self.id = id
self.arg = arg
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTParenAttribute):
return NotImplemented
return self.id == other.id and self.arg == other.arg
def __hash__(self) -> int:
return hash((self.id, self.arg))
def _stringify(self, transform: StringifyTransform) -> str:
return f'{self.id}({self.arg})'
def describe_signature(self, signode: TextElement) -> None:
signode.append(nodes.Text(str(self)))
| ASTParenAttribute |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/test_util.py | {
"start": 949,
"end": 3112
} | class ____(test.TestCase):
"""Base class for test cases."""
def _run_targets(self, targets1, targets2=None, run_init=True):
targets1 = nest.flatten(targets1)
targets2 = ([] if targets2 is None else nest.flatten(targets2))
assert len(targets1) == len(targets2) or not targets2
if run_init:
init = variables.global_variables_initializer()
self.evaluate(init)
return self.evaluate(targets1 + targets2)
# TODO(agarwal): Allow tests to pass down tolerances.
def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
outputs = self._run_targets(targets1, targets2)
outputs = nest.flatten(outputs) # flatten SparseTensorValues
n = len(outputs) // 2
for i in range(n):
if outputs[i + n].dtype != np.object_:
self.assertAllClose(outputs[i + n], outputs[i], rtol=rtol, atol=atol)
else:
self.assertAllEqual(outputs[i + n], outputs[i])
def _test_loop_fn(self,
loop_fn,
iters,
parallel_iterations=None,
fallback_to_while_loop=False,
rtol=1e-4,
atol=1e-5):
t1 = pfor_control_flow_ops.pfor(
loop_fn,
iters=iters,
fallback_to_while_loop=fallback_to_while_loop,
parallel_iterations=parallel_iterations)
loop_fn_dtypes = nest.map_structure(lambda x: x.dtype, t1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, iters=iters,
parallel_iterations=parallel_iterations)
def _check_shape(a, b):
msg = (
"Inferred static shapes are different between two loops:"
f" {a.shape} vs {b.shape}."
)
# TODO(b/268146947): should assert bool(a.shape) == bool(b.shape),
# since both should be either defined or undefined. But it does not work.
if b.shape:
self.assertEqual(a.shape.as_list()[0], b.shape.as_list()[0], msg)
# TODO(b/268146947): self.assertShapeEqual(a, b, msg) does not work.
nest.map_structure(_check_shape, t1, t2)
self.run_and_assert_equal(t1, t2, rtol=rtol, atol=atol)
| PForTestCase |
python | etianen__django-reversion | tests/test_app/tests/test_views.py | {
"start": 1311,
"end": 1597
} | class ____(LoginMixin, TestModelMixin, TestBase):
def testCreateRevisionUser(self):
response = self.client.post("/test-app/revision-mixin/")
obj = TestModel.objects.get(pk=response.content)
self.assertSingleRevision((obj,), user=self.user)
| RevisionMixinUserTest |
python | ipython__ipython | IPython/core/magic_arguments.py | {
"start": 8710,
"end": 8907
} | class ____(ArgMethodWrapper):
""" Store arguments and keywords to pass to add_argument().
Instances also serve to decorate command methods.
"""
_method_name = 'add_argument'
| argument |
python | kamyu104__LeetCode-Solutions | Python/zuma-game.py | {
"start": 1714,
"end": 3503
} | class ____(object):
def findMinStep(self, board, hand):
"""
:type board: str
:type hand: str
:rtype: int
"""
def shrink(s): # Time: O(n), Space: O(n)
stack = []
start = 0
for i in xrange(len(s)+1):
if i == len(s) or s[i] != s[start]:
if stack and stack[-1][0] == s[start]:
stack[-1][1] += i - start
if stack[-1][1] >= 3:
stack.pop()
elif s and i - start < 3:
stack += [s[start], i - start],
start = i
result = []
for p in stack:
result += [p[0]] * p[1]
return result
def findMinStepHelper(board, hand, lookup):
if not board: return 0
if not hand: return float("inf")
if tuple(hand) in lookup[tuple(board)]: return lookup[tuple(board)][tuple(hand)]
result = float("inf")
for i in xrange(len(hand)):
for j in xrange(len(board)+1):
next_board = shrink(board[0:j] + hand[i:i+1] + board[j:])
next_hand = hand[0:i] + hand[i+1:]
result = min(result, findMinStepHelper(next_board, next_hand, lookup) + 1)
lookup[tuple(board)][tuple(hand)] = result
return result
lookup = collections.defaultdict(dict)
board, hand = list(board), list(hand)
result = findMinStepHelper(board, hand, lookup)
return -1 if result == float("inf") else result
# Time: O((b * h) * b * b! * h!)
# Space: O(b * b! * h!)
import collections
# greedy solution without proof (possibly incorrect)
| Solution_TLE |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeature.py | {
"start": 1412,
"end": 2367
} | class ____(object):
def __init__(self, name, type_, fold, repeat, value, time, comment=""):
self.name = name
self.type_ = type_
self.fold = fold
self.repeat = repeat
self.value = value
self.time = time
self.comment = comment
def to_arff_row(self):
if self.type_ == "METAFEATURE":
value = self.value
else:
value = "?"
return [
self.name,
self.type_,
self.fold,
self.repeat,
value,
self.time,
self.comment,
]
def __repr__(self):
repr = (
"%s (type: %s, fold: %d, repeat: %d, value: %s, time: %3.3f, "
"comment: %s)"
)
repr = repr % tuple(
self.to_arff_row()[:4]
+ [str(self.to_arff_row()[4])]
+ self.to_arff_row()[5:]
)
return repr
| MetaFeatureValue |
python | scikit-learn__scikit-learn | sklearn/_loss/loss.py | {
"start": 26589,
"end": 27603
} | class ____(BaseLoss):
"""Half Gamma deviance loss with log-link, for regression.
Domain:
y_true and y_pred in positive real numbers
Link:
y_pred = exp(raw_prediction)
For a given sample x_i, half Gamma deviance loss is defined as::
loss(x_i) = log(exp(raw_prediction_i)/y_true_i)
+ y_true/exp(raw_prediction_i) - 1
Half the Gamma deviance is actually proportional to the negative log-
likelihood up to constant terms (not involving raw_prediction) and
simplifies the computation of the gradients.
We also skip the constant term `-log(y_true_i) - 1`.
"""
def __init__(self, sample_weight=None):
super().__init__(closs=CyHalfGammaLoss(), link=LogLink())
self.interval_y_true = Interval(0, np.inf, False, False)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
term = -np.log(y_true) - 1
if sample_weight is not None:
term *= sample_weight
return term
| HalfGammaLoss |
python | Netflix__metaflow | metaflow/plugins/datatools/s3/s3.py | {
"start": 3203,
"end": 3288
} | class ____(MetaflowException):
headline = "S3 invalid range"
| MetaflowS3InvalidRange |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/actions.py | {
"start": 1935,
"end": 2223
} | class ____(CompositeAction):
"""Composite action parser for a sanity target."""
def create_parser(self) -> NamespaceParser:
"""Return a namespace parser to parse the argument associated with this action."""
return SanityPythonTargetParser()
| SanityPythonTargetAction |
python | bokeh__bokeh | src/bokeh/client/websocket.py | {
"start": 1630,
"end": 3239
} | class ____:
''' Used for compatibility across Tornado versions and to add write_lock'''
def __init__(self, socket: WebSocketClientConnection) -> None:
self._socket = socket
# write_lock allows us to lock the connection to send multiple
# messages atomically.
self.write_lock = locks.Lock()
# Internal methods --------------------------------------------------------
async def write_message(self, message: str | bytes, binary: bool = False, locked: bool = True) -> None:
''' Write a message to the websocket after obtaining the appropriate
Bokeh Document lock.
'''
if locked:
with await self.write_lock.acquire():
self._socket.write_message(message, binary)
else:
self._socket.write_message(message, binary)
def close(self, code: int | None = None, reason: str | None = None) -> None:
''' Close the websocket. '''
return self._socket.close(code, reason)
def read_message(self, callback: Callable[..., Any] | None = None) -> Awaitable[None | str | bytes]:
''' Read a message from websocket and execute a callback.
'''
return self._socket.read_message(callback)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| WebSocketClientConnectionWrapper |
python | apache__thrift | lib/py/src/protocol/TBase.py | {
"start": 2829,
"end": 2895
} | class ____(TFrozenBase, TExceptionBase):
pass
| TFrozenExceptionBase |
python | wandb__wandb | wandb/sdk/data_types/table.py | {
"start": 50156,
"end": 54445
} | class ____(_dtypes.Type):
name = "partitioned-table"
types = [PartitionedTable]
_dtypes.TypeRegistry.add(_TableType)
_dtypes.TypeRegistry.add(_JoinedTableType)
_dtypes.TypeRegistry.add(_PartitionedTableType)
_dtypes.TypeRegistry.add(_ForeignKeyType)
_dtypes.TypeRegistry.add(_PrimaryKeyType)
_dtypes.TypeRegistry.add(_ForeignIndexType)
def _get_data_from_increments(
json_obj: Dict[str, Any], source_artifact: "artifact.Artifact"
) -> List[Any]:
"""Get data from incremental table artifacts.
Args:
json_obj: The JSON object containing table metadata.
source_artifact: The source artifact containing the table data.
Returns:
List of table rows from all increments.
"""
if "latest" not in source_artifact.aliases:
wandb.termwarn(
(
"It is recommended to use the latest version of the "
"incremental table artifact for ordering guarantees."
),
repeat=False,
)
data: List[Any] = []
increment_num = json_obj.get("increment_num", None)
if increment_num is None:
return data
# Sort by increment number first, then by timestamp if present
# Format of name is: "{incr_num}-{timestamp_ms}.{key}.table.json"
def get_sort_key(key: str) -> Tuple[int, int]:
try:
parts = key.split(".")
increment_parts = parts[0].split("-")
increment_num = int(increment_parts[0])
# If there's a timestamp part, use it for secondary sorting
timestamp = int(increment_parts[1]) if len(increment_parts) > 1 else 0
except (ValueError, IndexError):
wandb.termwarn(
(
f"Could not parse artifact entry for increment {key}."
" The entry name does not follow the naming convention"
" <increment_number>-<timestamp>.<key>.table.json"
" The data in the table will be out of order."
),
repeat=False,
)
return (0, 0)
return (increment_num, timestamp)
sorted_increment_keys = []
for entry_key in source_artifact.manifest.entries:
if entry_key.endswith(".table.json"):
sorted_increment_keys.append(entry_key)
sorted_increment_keys.sort(key=get_sort_key)
for entry_key in sorted_increment_keys:
try:
with open(source_artifact.manifest.entries[entry_key].download()) as f:
table_data = json.load(f)
data.extend(table_data["data"])
except (json.JSONDecodeError, KeyError) as e:
raise wandb.Error(f"Invalid table file {entry_key}") from e
return data
def _process_table_row(
row: List[Any],
timestamp_column_indices: Set[_dtypes.TimestampType],
np_deserialized_columns: Dict[int, Any],
source_artifact: "artifact.Artifact",
row_idx: int,
) -> List[Any]:
"""Convert special columns in a table row to Python types.
Processes a single row of table data by converting timestamp values to
datetime objects, replacing np typed cells with numpy array data,
and initializing media objects from their json value.
Args:
row: The row data to process.
timestamp_column_indices: Set of column indices containing timestamps.
np_deserialized_columns: Dictionary mapping column indices to numpy arrays.
source_artifact: The source artifact containing the table data.
row_idx: The index of the current row.
Returns:
Processed row data.
"""
row_data = []
for c_ndx, item in enumerate(row):
cell: Any
if c_ndx in timestamp_column_indices and isinstance(item, (int, float)):
cell = datetime.datetime.fromtimestamp(
item / 1000, tz=datetime.timezone.utc
)
elif c_ndx in np_deserialized_columns:
cell = np_deserialized_columns[c_ndx][row_idx]
elif (
isinstance(item, dict)
and "_type" in item
and (obj := WBValue.init_from_json(item, source_artifact))
):
cell = obj
else:
cell = item
row_data.append(cell)
return row_data
| _PartitionedTableType |
python | plotly__plotly.py | plotly/graph_objs/densitymap/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8529
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "densitymap.colorbar"
_path_str = "densitymap.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.densitymap.col
orbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.densitymap.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymap.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | openai__openai-python | src/openai/types/realtime/realtime_audio_config_input_param.py | {
"start": 533,
"end": 813
} | class ____(TypedDict, total=False):
type: NoiseReductionType
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
| NoiseReduction |
python | getsentry__sentry | src/sentry/search/eap/columns.py | {
"start": 8797,
"end": 9617
} | class ____(ResolvedFunction):
# The internal rpc alias for this column
internal_name: Function.ValueType
extrapolation_mode: ExtrapolationMode.ValueType
# The condition to filter on
filter: TraceItemFilter
# The attribute to conditionally aggregate on
key: AttributeKey
is_aggregate: bool = field(default=True, init=False)
@property
def proto_definition(self) -> AttributeConditionalAggregation:
"""The definition of this function as needed by the RPC"""
return AttributeConditionalAggregation(
aggregate=self.internal_name,
key=self.key,
filter=self.filter,
label=self.public_alias,
extrapolation_mode=self.extrapolation_mode,
)
@dataclass(frozen=True, kw_only=True)
| ResolvedConditionalAggregate |
python | TheAlgorithms__Python | data_structures/linked_list/__init__.py | {
"start": 431,
"end": 3760
} | class ____:
def __init__(self) -> None:
self.head: Node | None = None
self.size = 0
def add(self, item: Any, position: int = 0) -> None:
"""
Add an item to the LinkedList at the specified position.
Default position is 0 (the head).
Args:
item (Any): The item to add to the LinkedList.
position (int, optional): The position at which to add the item.
Defaults to 0.
Raises:
ValueError: If the position is negative or out of bounds.
>>> linked_list = LinkedList()
>>> linked_list.add(1)
>>> linked_list.add(2)
>>> linked_list.add(3)
>>> linked_list.add(4, 2)
>>> print(linked_list)
3 --> 2 --> 4 --> 1
# Test adding to a negative position
>>> linked_list.add(5, -3)
Traceback (most recent call last):
...
ValueError: Position must be non-negative
# Test adding to an out-of-bounds position
>>> linked_list.add(5,7)
Traceback (most recent call last):
...
ValueError: Out of bounds
>>> linked_list.add(5, 4)
>>> print(linked_list)
3 --> 2 --> 4 --> 1 --> 5
"""
if position < 0:
raise ValueError("Position must be non-negative")
if position == 0 or self.head is None:
new_node = Node(item, self.head)
self.head = new_node
else:
current = self.head
for _ in range(position - 1):
current = current.next
if current is None:
raise ValueError("Out of bounds")
new_node = Node(item, current.next)
current.next = new_node
self.size += 1
def remove(self) -> Any:
# Switched 'self.is_empty()' to 'self.head is None'
# because mypy was considering the possibility that 'self.head'
# can be None in below else part and giving error
if self.head is None:
return None
else:
item = self.head.item
self.head = self.head.next
self.size -= 1
return item
def is_empty(self) -> bool:
return self.head is None
def __str__(self) -> str:
"""
>>> linked_list = LinkedList()
>>> linked_list.add(23)
>>> linked_list.add(14)
>>> linked_list.add(9)
>>> print(linked_list)
9 --> 14 --> 23
"""
if self.is_empty():
return ""
else:
iterate = self.head
item_str = ""
item_list: list[str] = []
while iterate:
item_list.append(str(iterate.item))
iterate = iterate.next
item_str = " --> ".join(item_list)
return item_str
def __len__(self) -> int:
"""
>>> linked_list = LinkedList()
>>> len(linked_list)
0
>>> linked_list.add("a")
>>> len(linked_list)
1
>>> linked_list.add("b")
>>> len(linked_list)
2
>>> _ = linked_list.remove()
>>> len(linked_list)
1
>>> _ = linked_list.remove()
>>> len(linked_list)
0
"""
return self.size
| LinkedList |
python | pennersr__django-allauth | allauth/socialaccount/providers/twitter/views.py | {
"start": 245,
"end": 562
} | class ____(OAuth):
"""
Verifying twitter credentials
"""
_base_url = "https://api.x.com/1.1/account/verify_credentials.json"
url = _base_url + "?include_email=true" if QUERY_EMAIL else _base_url
def get_user_info(self):
user = self.query(self.url).json()
return user
| TwitterAPI |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 15272,
"end": 16494
} | class ____(Array):
"""
Handles variable lengths arrays (i.e. where *arraysize* is '*').
"""
format = "O"
def __init__(self, field, base, arraysize, config=None, pos=None):
Array.__init__(self, field, config)
self._base = base
self.default = np.array([], dtype=self._base.format)
def output(self, value, mask):
output = self._base.output
result = [output(x, m) for x, m in np.broadcast(value, mask)]
return " ".join(result)
def binparse(self, read):
length = self._parse_length(read)
result = []
result_mask = []
binparse = self._base.binparse
for i in range(length):
val, mask = binparse(read)
result.append(val)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
def binoutput(self, value, mask):
if value is None or len(value) == 0:
return _zero_int
length = len(value)
result = [self._write_length(length)]
binoutput = self._base.binoutput
for x, m in zip(value, value.mask):
result.append(binoutput(x, m))
return _empty_bytes.join(result)
| VarArray |
python | pytorch__pytorch | torch/utils/weak.py | {
"start": 5484,
"end": 11545
} | class ____(MutableMapping):
def __init__(self, dict=None, ref_type=WeakIdRef) -> None: # CHANGED
self.data = {}
self.ref_type = ref_type # CHANGED
def remove(k, selfref=ref(self)) -> None:
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
try:
del self.data[k]
except KeyError:
pass
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
self._dirty_len = False
if dict is not None:
self.update(dict)
def _commit_removals(self) -> None:
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
pop = self._pending_removals.pop
d = self.data
while True:
try:
key = pop()
except IndexError:
return
try:
del d[key]
except KeyError:
pass
def _scrub_removals(self) -> None:
d = self.data
self._pending_removals = [k for k in self._pending_removals if k in d]
self._dirty_len = False
def __delitem__(self, key) -> None:
self._dirty_len = True
del self.data[self.ref_type(key)] # CHANGED
def __getitem__(self, key):
return self.data[self.ref_type(key)] # CHANGED
def __len__(self) -> int:
if self._dirty_len and self._pending_removals:
# self._pending_removals may still contain keys which were
# explicitly removed, we have to scrub them (see issue #21173).
self._scrub_removals()
return len(self.data) - len(self._pending_removals)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} at {id(self):#x}>"
def __setitem__(self, key, value) -> None:
self.data[self.ref_type(key, self._remove)] = value # CHANGED
def copy(self):
new = WeakIdKeyDictionary()
with _IterationGuard(self):
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
with _IterationGuard(self):
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(self.ref_type(key), default) # CHANGED
def __contains__(self, key) -> bool:
try:
wr = self.ref_type(key) # CHANGED
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
self._dirty_len = True
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
# pyrefly: ignore [bad-override]
def pop(self, key, *args):
self._dirty_len = True
# pyrefly: ignore [not-iterable]
return self.data.pop(self.ref_type(key), *args) # CHANGED
def setdefault(self, key, default=None):
return self.data.setdefault(
self.ref_type(key, self._remove), default
) # CHANGED
def update(self, dict=None, **kwargs) -> None: # type: ignore[override]
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[self.ref_type(key, self._remove)] = value # CHANGED
if kwargs:
self.update(kwargs)
def __ior__(self, other):
self.update(other)
return self
def __or__(self, other):
if isinstance(other, _collections_abc.Mapping):
c = self.copy()
c.update(other)
return c
return NotImplemented
def __ror__(self, other):
if isinstance(other, _collections_abc.Mapping):
c = self.__class__()
c.update(other)
c.update(self)
return c
return NotImplemented
# Default Mapping equality will tests keys for equality, but
# we want to test ids for equality
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return {id(k): v for k, v in self.items()} == {
id(k): v for k, v in other.items()
}
# Convenience alias
WeakTensorKeyDictionary = WeakIdKeyDictionary
| WeakIdKeyDictionary |
python | ray-project__ray | rllib/core/columns.py | {
"start": 62,
"end": 2560
} | class ____:
"""Definitions of common column names for RL data, e.g. 'obs', 'rewards', etc..
Note that this replaces the `SampleBatch` and `Postprocessing` columns (of the same
name).
"""
# Observation received from an environment after `reset()` or `step()`.
OBS = "obs"
# Infos received from an environment after `reset()` or `step()`.
INFOS = "infos"
# Action computed/sampled by an RLModule.
ACTIONS = "actions"
# Action actually sent to the (gymnasium) `Env.step()` method.
ACTIONS_FOR_ENV = "actions_for_env"
# Reward returned by `env.step()`.
REWARDS = "rewards"
# Termination signal received from an environment after `step()`.
TERMINATEDS = "terminateds"
# Truncation signal received from an environment after `step()` (e.g. because
# of a reached time limit).
TRUNCATEDS = "truncateds"
# Next observation: Only used by algorithms that need to look at TD-data for
# training, such as off-policy/DQN algos.
NEXT_OBS = "new_obs"
# Uniquely identifies an episode
EPS_ID = "eps_id"
AGENT_ID = "agent_id"
MODULE_ID = "module_id"
# The size of non-zero-padded data within a (e.g. LSTM) zero-padded
# (B, T, ...)-style train batch.
SEQ_LENS = "seq_lens"
# Episode timestep counter.
T = "t"
# Common extra RLModule output keys.
STATE_IN = "state_in"
NEXT_STATE_IN = "next_state_in"
STATE_OUT = "state_out"
NEXT_STATE_OUT = "next_state_out"
EMBEDDINGS = "embeddings"
ACTION_DIST_INPUTS = "action_dist_inputs"
ACTION_PROB = "action_prob"
ACTION_LOGP = "action_logp"
# Value function predictions.
VF_PREDS = "vf_preds"
# Values, predicted at one timestep beyond the last timestep taken.
# These are usually calculated via the value function network using the final
# observation (and in case of an RNN: the last returned internal state).
VALUES_BOOTSTRAPPED = "values_bootstrapped"
# Postprocessing columns.
ADVANTAGES = "advantages"
VALUE_TARGETS = "value_targets"
# Intrinsic rewards (learning with curiosity).
INTRINSIC_REWARDS = "intrinsic_rewards"
# Discounted sum of rewards till the end of the episode (or chunk).
RETURNS_TO_GO = "returns_to_go"
# Loss mask. If provided in a train batch, a Learner's compute_loss_for_module
# method should respect the False-set value in here and mask out the respective
# items form the loss.
LOSS_MASK = "loss_mask"
| Columns |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 14536,
"end": 16473
} | class ____(nodes.Inline, nodes.TextElement):
"""Node representing a potential way to create a cross-reference and the
condition in which this way should be used.
This node is only allowed to be placed under a :py:class:`pending_xref`
node. A **pending_xref** node must contain either no **pending_xref_condition**
nodes or it must only contains **pending_xref_condition** nodes.
The cross-reference resolver will replace a :py:class:`pending_xref` which
contains **pending_xref_condition** nodes by the content of exactly one of
those **pending_xref_condition** nodes' content. It uses the **condition**
attribute to decide which **pending_xref_condition** node's content to
use. For example, let us consider how the cross-reference resolver acts on::
<pending_xref refdomain="py" reftarget="io.StringIO ...>
<pending_xref_condition condition="resolved">
<literal>
StringIO
<pending_xref_condition condition="*">
<literal>
io.StringIO
If the cross-reference resolver successfully resolves the cross-reference,
then it rewrites the **pending_xref** as::
<reference>
<literal>
StringIO
Otherwise, if the cross-reference resolution failed, it rewrites the
**pending_xref** as::
<reference>
<literal>
io.StringIO
The **pending_xref_condition** node should have **condition** attribute.
Domains can be store their individual conditions into the attribute to
filter contents on resolving phase. As a reserved condition name,
``condition="*"`` is used for the fallback of resolution failure.
Additionally, as a recommended condition name, ``condition="resolved"``
represents a resolution success in the intersphinx module.
.. versionadded:: 4.0
"""
| pending_xref_condition |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py | {
"start": 64274,
"end": 71266
} | class ____(AwsBaseOperator[EmrServerlessHook]):
"""
Operator to stop an EMR Serverless application.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrServerlessStopApplicationOperator`
:param application_id: ID of the EMR Serverless application to stop.
:param wait_for_completion: If true, wait for the Application to stop before returning. Default to True
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param force_stop: If set to True, any job for that app that is not in a terminal state will be cancelled.
Otherwise, trying to stop an app with running jobs will return an error.
If you want to wait for the jobs to finish gracefully, use
:class:`airflow.providers.amazon.aws.sensors.emr.EmrServerlessJobSensor`
:param waiter_max_attempts: Number of times the waiter should poll the application to check the state.
Defaults to 25 if not set.
:param waiter_delay: Number of seconds between polling the state of the application.
Defaults to 60 seconds if not set.
:param deferrable: If True, the operator will wait asynchronously for the application to stop.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False, but can be overridden in config file by setting default_deferrable to True)
"""
aws_hook_class = EmrServerlessHook
template_fields: Sequence[str] = aws_template_fields(
"application_id",
)
def __init__(
self,
application_id: str,
wait_for_completion: bool = True,
waiter_max_attempts: int | ArgNotSet = NOTSET,
waiter_delay: int | ArgNotSet = NOTSET,
force_stop: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
waiter_delay = 60 if waiter_delay is NOTSET else waiter_delay
waiter_max_attempts = 25 if waiter_max_attempts is NOTSET else waiter_max_attempts
self.application_id = application_id
self.wait_for_completion = False if deferrable else wait_for_completion
self.waiter_max_attempts = int(waiter_max_attempts) # type: ignore[arg-type]
self.waiter_delay = int(waiter_delay) # type: ignore[arg-type]
self.force_stop = force_stop
self.deferrable = deferrable
super().__init__(**kwargs)
def execute(self, context: Context) -> None:
self.log.info("Stopping application: %s", self.application_id)
if self.force_stop:
count = self.hook.cancel_running_jobs(
application_id=self.application_id,
wait_for_completion=False,
)
if count > 0:
self.log.info("now waiting for the %s cancelled job(s) to terminate", count)
if self.deferrable:
self.defer(
trigger=EmrServerlessCancelJobsTrigger(
application_id=self.application_id,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
method_name="stop_application",
)
self.hook.get_waiter("no_job_running").wait(
applicationId=self.application_id,
states=list(self.hook.JOB_INTERMEDIATE_STATES.union({"CANCELLING"})),
WaiterConfig={
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
},
)
else:
self.log.info("no running jobs found with application ID %s", self.application_id)
self.hook.conn.stop_application(applicationId=self.application_id)
if self.deferrable:
self.defer(
trigger=EmrServerlessStopApplicationTrigger(
application_id=self.application_id,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
method_name="execute_complete",
)
if self.wait_for_completion:
waiter = self.hook.get_waiter("serverless_app_stopped")
wait(
waiter=waiter,
waiter_max_attempts=self.waiter_max_attempts,
waiter_delay=self.waiter_delay,
args={"applicationId": self.application_id},
failure_message="Error stopping application",
status_message="Serverless Application status is",
status_args=["application.state", "application.stateDetails"],
)
self.log.info("EMR serverless application %s stopped successfully", self.application_id)
def stop_application(self, context: Context, event: dict[str, Any] | None = None) -> None:
if event is None:
self.log.error("Trigger error: event is None")
raise AirflowException("Trigger error: event is None")
if event["status"] == "success":
self.hook.conn.stop_application(applicationId=self.application_id)
self.defer(
trigger=EmrServerlessStopApplicationTrigger(
application_id=self.application_id,
aws_conn_id=self.aws_conn_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
timeout=timedelta(seconds=self.waiter_max_attempts * self.waiter_delay),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] == "success":
self.log.info("EMR serverless application %s stopped successfully", self.application_id)
| EmrServerlessStopApplicationOperator |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 9232,
"end": 9377
} | class ____(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.float64
self.dec = 14
self.type = 3
| TestDCTIIIDouble |
python | pytorch__pytorch | torch/distributed/checkpoint/metadata.py | {
"start": 3576,
"end": 3785
} | class ____:
checkpoint_id: Union[str, os.PathLike, None] = None
save_id: Optional[str] = None
load_id: Optional[str] = None
modules: list[str] = field(default_factory=list)
@dataclass
| StorageMeta |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-integers-to-choose-from-a-range-i.py | {
"start": 794,
"end": 1592
} | class ____(object):
def maxCount(self, banned, n, maxSum):
"""
:type banned: List[int]
:type n: int
:type maxSum: int
:rtype: int
"""
def check(x):
return (x+1)*x//2-prefix[bisect.bisect_right(sorted_banned, x)] <= maxSum
sorted_banned = sorted(set(banned))
prefix = [0]*(len(sorted_banned)+1)
for i in xrange(len(sorted_banned)):
prefix[i+1] = prefix[i]+sorted_banned[i]
left, right = 1, n
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right-bisect.bisect_right(sorted_banned, right)
# Time: O(n)
# Space: O(b)
# greedy
| Solution2 |
python | geekcomputers__Python | BlackJack_game/blackjack_simulate.py | {
"start": 9273,
"end": 11646
} | class ____:
def __init__(self):
self.data = []
self.winner = None
self.remain_chips = 0
self.rounds = 0
self.player_win_count = 0
self.dealer_win_count = 0
self.player_point = 0
self.dealer_point = 0
def update(self, winner, chips, player_point, dealer_point):
self.rounds += 1
self.remain_chips = chips
self.winner = winner
if self.winner == "Player":
self.player_win_count += 1
elif self.winner == "Dealer":
self.dealer_win_count += 1
self.player_point = player_point
self.dealer_point = dealer_point
def record(self, winner, chips, player_point, dealer_point):
self.update(winner, chips, player_point, dealer_point)
Row = namedtuple(
"Row", ["rounds", "player_point", "dealer_point", "winner", "remain_chips"]
)
row = Row(
self.rounds,
self.player_point,
self.dealer_point,
self.winner,
self.remain_chips,
)
self.data.append(row)
def draw_diagram(self):
content = "Record display"
bars = "--" * 14
content_bar = bars + content + bars
base_bar = bars + "-" * len(content) + bars
os.system("clear")
print(base_bar)
print(content_bar)
print(base_bar)
self.digram()
print(base_bar)
print(content_bar)
print(base_bar)
def digram(self):
title = "Round\tPlayer-Point\tDealer-Point\tWinner-is\tRemain-Chips"
row_fmt = "{}\t{}\t\t{}\t\t{}\t\t{}"
print(title)
for row in self.data:
print(
row_fmt.format(
row.rounds,
row.player_point,
row.dealer_point,
row.winner,
row.remain_chips,
)
)
print("")
win_rate_fmt = ">> Player win rate: {}%\n>> Dealer win rate: {}%"
try:
player_rate = round(self.player_win_count / self.rounds * 100, 2)
dealer_rate = round(self.dealer_win_count / self.rounds * 100, 2)
except ZeroDivisionError:
player_rate = 0
dealer_rate = 0
print(win_rate_fmt.format(player_rate, dealer_rate))
| Recorder |
python | conda__conda | conda/exceptions.py | {
"start": 13388,
"end": 13824
} | class ____(CondaError):
def __init__(self, target_directory: PathType):
message = dals(
"""
The target directory exists, but it is not a conda environment.
Use 'conda create' to convert the directory to a conda environment.
target directory: %(target_directory)s
"""
)
super().__init__(message, target_directory=target_directory)
| DirectoryNotACondaEnvironmentError |
python | getsentry__sentry | tests/sentry/integrations/discord/test_issue_alert.py | {
"start": 1547,
"end": 11885
} | class ____(RuleTestCase):
rule_cls = DiscordNotifyServiceAction
def setUp(self) -> None:
self.guild_id = "guild-id"
self.channel_id = "12345678910"
self.discord_user_id = "user1234"
self.discord_integration = self.create_integration(
provider="discord",
name="Cool server",
external_id=self.guild_id,
organization=self.organization,
)
self.provider = self.create_identity_provider(integration=self.discord_integration)
self.identity = self.create_identity(
user=self.user, identity_provider=self.provider, external_id=self.discord_user_id
)
self.event = self.store_event(
data={
"event_id": "a" * 32,
"message": "Event message",
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=self.project.id,
)
self.tags = "environment, user"
self.rule = self.get_rule(
data={
"server": self.discord_integration.id,
"channel_id": self.channel_id,
"tags": self.tags,
}
)
responses.add(
method=responses.POST,
url=f"{DISCORD_BASE_URL}{MESSAGE_URL.format(channel_id=self.channel_id)}",
status=200,
json={"message_id": "12345678910"},
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def assert_lifecycle_metrics(self, mock_record_event: mock.MagicMock) -> None:
notification_uuid = str(uuid4())
self.rule.after(self.event, notification_uuid=notification_uuid)
assert_slo_metric(mock_record_event)
@mock.patch(
"sentry.integrations.discord.client.DiscordClient.send_message", side_effect=Exception
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def assert_lifecycle_metrics_failure(
self, mock_record_event: mock.MagicMock, mock_send_message: mock.MagicMock
) -> None:
self.rule.after(self.event)
assert_slo_metric(mock_record_event, EventLifecycleOutcome.FAILURE)
@mock.patch(
"sentry.integrations.discord.client.DiscordClient.send_message",
side_effect=ApiRateLimitedError(text="Rate limited"),
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def assert_lifecycle_metrics_halt_for_rate_limit(
self, mock_record_event: mock.MagicMock, mock_send_message: mock.MagicMock
) -> None:
self.rule.after(self.event)
assert_slo_metric(mock_record_event, EventLifecycleOutcome.HALTED)
@mock.patch(
"sentry.integrations.discord.client.DiscordClient.send_message",
side_effect=ApiError(code=50001, text="Missing access"),
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def assert_lifecycle_metrics_halt_for_missing_access(
self, mock_record_event: mock.MagicMock, mock_send_message: mock.MagicMock
) -> None:
self.rule.after(self.event)
assert_slo_metric(mock_record_event, EventLifecycleOutcome.HALTED)
@mock.patch(
"sentry.integrations.discord.client.DiscordClient.send_message",
side_effect=ApiError(code=400, text="Bad request"),
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def assert_lifecycle_metrics_failure_for_other_api_error(
self, mock_record_event: mock.MagicMock, mock_send_message: mock.MagicMock
) -> None:
self.rule.after(self.event)
assert_slo_metric(mock_record_event, EventLifecycleOutcome.FAILURE)
@responses.activate
@mock.patch("sentry.analytics.record")
def test_basic(self, mock_record: mock.MagicMock) -> None:
notification_uuid = str(uuid4())
results = list(self.rule.after(self.event, notification_uuid=notification_uuid))
assert len(results) == 1
results[0].callback(self.event, futures=[])
body = responses.calls[0].request.body
data = orjson.loads(body)
embed = data["embeds"][0]
assert embed == {
"title": build_attachment_title(self.event.group),
"url": get_title_link(
self.event.group,
self.event,
False,
False,
None,
ExternalProviders.DISCORD,
notification_uuid=notification_uuid,
),
"color": LEVEL_TO_COLOR["error"],
"footer": {"text": build_footer(self.event.group, self.event.project, "{text}", None)},
"fields": [],
"timestamp": self.event.timestamp,
}
buttons = data["components"][0]["components"]
assert (
buttons[0]["custom_id"] == f"{DiscordComponentCustomIds.RESOLVE}:{self.event.group.id}"
)
assert (
buttons[1]["custom_id"] == f"{DiscordComponentCustomIds.ARCHIVE}:{self.event.group.id}"
)
assert (
buttons[2]["custom_id"]
== f"{DiscordComponentCustomIds.ASSIGN_DIALOG}:{self.event.group.id}"
)
assert_any_analytics_event(
mock_record,
DiscordIntegrationNotificationSent(
category="issue_alert",
organization_id=self.organization.id,
project_id=self.project.id,
group_id=self.event.group_id,
notification_uuid=notification_uuid,
alert_id=None,
),
)
assert_last_analytics_event(
mock_record,
AlertSentEvent(
provider="discord",
alert_id="",
alert_type="issue_alert",
organization_id=self.organization.id,
project_id=self.project.id,
external_id=self.channel_id,
notification_uuid=notification_uuid,
),
)
@responses.activate
def test_has_releases(self) -> None:
release = Release.objects.create(
organization_id=self.organization.id,
version="1.0",
)
release.add_project(self.project)
results = list(self.rule.after(self.event))
assert len(results) == 1
results[0].callback(self.event, futures=[])
body = responses.calls[0].request.body
data = orjson.loads(body)
buttons = data["components"][0]["components"]
assert (
buttons[0]["custom_id"]
== f"{DiscordComponentCustomIds.RESOLVE_DIALOG}:{self.event.group.id}"
)
assert (
buttons[1]["custom_id"] == f"{DiscordComponentCustomIds.ARCHIVE}:{self.event.group.id}"
)
assert (
buttons[2]["custom_id"]
== f"{DiscordComponentCustomIds.ASSIGN_DIALOG}:{self.event.group.id}"
)
@responses.activate
@mock.patch(
"sentry.integrations.discord.message_builder.issues.Group.get_status",
return_value=GroupStatus.RESOLVED,
)
def test_resolved(self, mock_get_status: mock.MagicMock) -> None:
results = list(self.rule.after(self.event))
assert len(results) == 1
results[0].callback(self.event, futures=[])
body = responses.calls[0].request.body
data = orjson.loads(body)
buttons = data["components"][0]["components"]
assert (
buttons[0]["custom_id"]
== f"{DiscordComponentCustomIds.UNRESOLVE}:{self.event.group.id}"
)
assert (
buttons[1]["custom_id"] == f"{DiscordComponentCustomIds.ARCHIVE}:{self.event.group.id}"
)
assert (
buttons[2]["custom_id"]
== f"{DiscordComponentCustomIds.ASSIGN_DIALOG}:{self.event.group.id}"
)
@responses.activate
@mock.patch(
"sentry.integrations.discord.message_builder.issues.Group.get_status",
return_value=GroupStatus.IGNORED,
)
def test_ignored(self, mock_get_status: mock.MagicMock) -> None:
results = list(self.rule.after(self.event))
assert len(results) == 1
results[0].callback(self.event, futures=[])
body = responses.calls[0].request.body
data = orjson.loads(body)
buttons = data["components"][0]["components"]
assert (
buttons[0]["custom_id"] == f"{DiscordComponentCustomIds.RESOLVE}:{self.event.group.id}"
)
assert (
buttons[1]["custom_id"]
== f"{DiscordComponentCustomIds.MARK_ONGOING}:{self.event.group.id}"
)
assert (
buttons[2]["custom_id"]
== f"{DiscordComponentCustomIds.ASSIGN_DIALOG}:{self.event.group.id}"
)
@responses.activate
def test_feature_flag_disabled(self) -> None:
results = list(self.rule.after(self.event))
assert len(results) == 1
results[0].callback(self.event, futures=[])
responses.assert_call_count(f"{MESSAGE_URL.format(channel_id=self.channel_id)}", 0)
@responses.activate
def test_integration_removed(self) -> None:
integration_service.delete_integration(integration_id=self.discord_integration.id)
results = list(self.rule.after(self.event))
assert len(results) == 0
@responses.activate
@mock.patch(
"sentry.integrations.discord.actions.issue_alert.form.validate_channel_id",
return_value=None,
)
def test_get_form_instance(self, mock_validate_channel_id: mock.MagicMock) -> None:
form = self.rule.get_form_instance()
form.full_clean()
assert form.is_valid()
assert int(form.cleaned_data["server"]) == self.discord_integration.id
assert form.cleaned_data["channel_id"] == self.channel_id
assert form.cleaned_data["tags"] == self.tags
assert mock_validate_channel_id.call_count == 1
@responses.activate
def test_label(self) -> None:
label = self.rule.render_label()
assert (
label
== f"Send a notification to the Cool server Discord server in the channel with ID or URL: {self.channel_id} and show tags [{self.tags}] in the notification."
)
| DiscordIssueAlertTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.