language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | python-poetry__poetry | src/poetry/utils/env/system_env.py | {
"start": 380,
"end": 2658
} | class ____(Env):
"""
A system (i.e. not a virtualenv) Python environment.
"""
@property
def python(self) -> Path:
return Path(sys.executable)
@property
def sys_path(self) -> list[str]:
return sys.path
def get_paths(self) -> dict[str, str]:
import site
paths = sysconfig.get_paths().copy()
if site.check_enableusersite():
paths["usersite"] = site.getusersitepackages()
paths["userbase"] = site.getuserbase()
return paths
def get_supported_tags(self) -> list[Tag]:
return list(sys_tags())
def get_marker_env(self) -> MarkerEnv:
if hasattr(sys, "implementation"):
info = sys.implementation.version
iver = f"{info.major}.{info.minor}.{info.micro}"
kind = info.releaselevel
if kind != "final":
iver += kind[0] + str(info.serial)
implementation_name = sys.implementation.name
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
# Workaround for https://github.com/python/cpython/issues/99968
"python_full_version": platform.python_version().rstrip("+"),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version().split(".")[:2]),
"sys_platform": sys.platform,
"version_info": sys.version_info,
"interpreter_name": interpreter_name(),
"interpreter_version": interpreter_version(),
"sysconfig_platform": sysconfig.get_platform(),
"free_threading": bool(sysconfig.get_config_var("Py_GIL_DISABLED")),
}
def is_venv(self) -> bool:
return self._path != self._base
def _get_lib_dirs(self) -> list[Path]:
return super()._get_lib_dirs() + [Path(d) for d in site.getsitepackages()]
| SystemEnv |
python | encode__django-rest-framework | tests/test_middleware.py | {
"start": 2046,
"end": 2691
} | class ____:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
assert isinstance(request, HttpRequest)
# Parse body with underlying Django request
request.body
# Process request with DRF view
response = self.get_response(request)
# Ensure request.POST is set as appropriate
if is_form_media_type(request.content_type):
assert request.POST == {'foo': ['bar']}
else:
assert request.POST == {}
return response
@override_settings(ROOT_URLCONF='tests.test_middleware')
| RequestPOSTMiddleware |
python | python-openxml__python-docx | tests/image/test_jpeg.py | {
"start": 7550,
"end": 8550
} | class ____:
def it_can_construct_from_a_stream_and_offset(self, from_stream_fixture):
stream, marker_code, offset, _Marker__init_, length = from_stream_fixture
marker = _Marker.from_stream(stream, marker_code, offset)
_Marker__init_.assert_called_once_with(ANY, marker_code, offset, length)
assert isinstance(marker, _Marker)
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
(JPEG_MARKER_CODE.SOI, 2, 0),
(JPEG_MARKER_CODE.APP0, 4, 16),
]
)
def from_stream_fixture(self, request, _Marker__init_):
marker_code, offset, length = request.param
bytes_ = b"\xff\xd8\xff\xe0\x00\x10"
stream_reader = StreamReader(io.BytesIO(bytes_), BIG_ENDIAN)
return stream_reader, marker_code, offset, _Marker__init_, length
@pytest.fixture
def _Marker__init_(self, request):
return initializer_mock(request, _Marker)
| Describe_Marker |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 4587,
"end": 4882
} | class ____(AtomicRule):
"""integrate((x**a)**b, x)"""
base: Expr
exp: Expr
def eval(self) -> Expr:
m = self.base * self.integrand
return Piecewise((m / (self.exp + 1), Ne(self.exp, -1)),
(m * log(self.base), True))
@dataclass
| NestedPowRule |
python | kamyu104__LeetCode-Solutions | Python/reachable-nodes-with-restrictions.py | {
"start": 35,
"end": 826
} | class ____(object):
def reachableNodes(self, n, edges, restricted):
"""
:type n: int
:type edges: List[List[int]]
:type restricted: List[int]
:rtype: int
"""
adj = [[] for _ in xrange(n)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
result = 0
lookup = [False]*n
for x in restricted:
lookup[x] = True
q = [0]
lookup[0] = True
while q:
new_q = []
for u in q:
result += 1
for v in adj[u]:
if lookup[v]:
continue
lookup[v] = True
new_q.append(v)
q = new_q
return result
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess10.py | {
"start": 360,
"end": 422
} | class ____(metaclass=_IntDescriptorMeta): ...
| IntDescriptorClass |
python | celery__celery | celery/bootsteps.py | {
"start": 11324,
"end": 12273
} | class ____(StartStopStep):
"""Bootstep that starts a message consumer."""
requires = ('celery.worker.consumer:Connection',)
consumers = None
def get_consumers(self, channel):
raise NotImplementedError('missing get_consumers')
def start(self, c):
channel = c.connection.channel()
self.consumers = self.get_consumers(channel)
for consumer in self.consumers or []:
consumer.consume()
def stop(self, c):
self._close(c, True)
def shutdown(self, c):
self._close(c, False)
def _close(self, c, cancel_consumers=True):
channels = set()
for consumer in self.consumers or []:
if cancel_consumers:
ignore_errors(c.connection, consumer.cancel)
if consumer.channel:
channels.add(consumer.channel)
for channel in channels:
ignore_errors(c.connection, channel.close)
| ConsumerStep |
python | ray-project__ray | python/ray/tune/tests/test_logger.py | {
"start": 10217,
"end": 15359
} | class ____(unittest.TestCase):
"""Test Aim integration."""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def initialize_logger(self, repo=None, experiment_name=None, metrics=None):
try:
from aim import Repo
except ImportError:
print("Skipping rest of test as aim is not installed.")
return
class Dummy:
pass
self.config = {
"a": 2,
"b": [1, 2],
"c": {"d": {"e": 123}},
"int32": np.int32(1),
"int64": np.int64(2),
"bool8": np.bool_(True),
"float32": np.float32(3),
"float64": np.float64(4),
"bad": Dummy(),
}
trial_logdir = os.path.join(self.test_dir, "trial_logdir")
trials = [
Trial(
evaluated_params=self.config,
trial_id="aim_1",
experiment_path=self.test_dir,
logdir=trial_logdir,
experiment_dir_name="aim_test",
path="bucket/aim_test/trial_0_logdir",
),
Trial(
evaluated_params=self.config,
trial_id="aim_2",
experiment_path=self.test_dir,
logdir=trial_logdir,
experiment_dir_name="aim_test",
path="bucket/aim_test/trial_1_logdir",
),
]
# Test that aim repo is saved to the experiment directory
# (one up from the trial directory) as the default.
# In this example, this is `self.test_dir`.
repo = repo or self.test_dir
logger = AimLoggerCallback(
repo=repo, experiment_name=experiment_name, metrics=metrics
)
for i, t in enumerate(trials):
with self.assertLogs("ray.tune.logger", level="INFO") as cm:
logger.log_trial_start(t)
# Check that we log that the "bad" hparam gets thrown away
assert "INFO" in cm.output[0]
logger.on_trial_result(0, [], t, result(0, 3 * i + 1))
logger.on_trial_result(1, [], t, result(1, 3 * i + 2))
logger.on_trial_result(
2, [], t, result(2, 3 * i + 3, score=[1, 2, 3], hello={"world": 1})
)
logger.on_trial_complete(3, [], t)
aim_repo = Repo(repo)
runs = list(aim_repo.iter_runs())
assert len(runs) == 2
runs.sort(key=lambda r: r["trial_id"])
return runs
def validateLogs(self, runs: list, metrics: list = None):
expected_logged_hparams = set(flatten_dict(self.config)) - {"bad"}
for i, run in enumerate(runs):
assert set(run["hparams"]) == expected_logged_hparams
assert run.get("trial_log_dir")
assert run.get("trial_ip")
results = None
all_tune_metrics = set()
for metric in run.metrics():
if metric.name.startswith("ray/tune/"):
all_tune_metrics.add(metric.name.replace("ray/tune/", ""))
if metric.name == "ray/tune/episode_reward_mean":
results = metric.values.values_list()
assert results
# Make sure that the set of reported metrics matches with the
# set of metric names passed in
# If None is passed in, then all Tune metrics get reported
assert metrics is None or set(metrics) == all_tune_metrics
results = [int(res) for res in results]
if i == 0:
self.assertSequenceEqual(results, [1, 2, 3])
elif i == 1:
self.assertSequenceEqual(results, [4, 5, 6])
def testDefault(self):
"""Test AimLoggerCallback with default settings.
- Req: a repo gets created at the experiment-level directory.
- Req: the experiment param passed into each aim Run is the Tune experiment name
"""
runs = self.initialize_logger()
self.validateLogs(runs)
for run in runs:
assert run.repo.path == os.path.join(self.test_dir, ".aim")
assert run.experiment == "aim_test"
def testFilteredMetrics(self):
"""Test AimLoggerCallback, logging only a subset of metrics."""
metrics_to_log = ("episode_reward_mean",)
runs = self.initialize_logger(metrics=metrics_to_log)
self.validateLogs(runs=runs, metrics=metrics_to_log)
def testCustomConfigurations(self):
"""Test AimLoggerCallback, setting a custom repo and experiment name."""
custom_repo = os.path.join(self.test_dir, "custom_repo")
runs = self.initialize_logger(repo=custom_repo, experiment_name="custom")
self.validateLogs(runs)
for run in runs:
assert run.repo.path == os.path.join(custom_repo, ".aim")
assert run.experiment == "custom"
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
| AimLoggerSuite |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 109160,
"end": 111806
} | class ____(TypedDict, total=False):
type: Required[Literal['model-fields']]
fields: Required[dict[str, ModelField]]
model_name: str
computed_fields: list[ComputedField]
strict: bool
extras_schema: CoreSchema
extras_keys_schema: CoreSchema
extra_behavior: ExtraBehavior
from_attributes: bool
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def model_fields_schema(
fields: dict[str, ModelField],
*,
model_name: str | None = None,
computed_fields: list[ComputedField] | None = None,
strict: bool | None = None,
extras_schema: CoreSchema | None = None,
extras_keys_schema: CoreSchema | None = None,
extra_behavior: ExtraBehavior | None = None,
from_attributes: bool | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> ModelFieldsSchema:
"""
Returns a schema that matches the fields of a Pydantic model, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
wrapper_schema = core_schema.model_fields_schema(
{'a': core_schema.model_field(core_schema.str_schema())}
)
v = SchemaValidator(wrapper_schema)
print(v.validate_python({'a': 'hello'}))
#> ({'a': 'hello'}, None, {'a'})
```
Args:
fields: The fields of the model
model_name: The name of the model, used for error messages, defaults to "Model"
computed_fields: Computed fields to use when serializing the model, only applies when directly inside a model
strict: Whether the model is strict
extras_schema: The schema to use when validating extra input data
extras_keys_schema: The schema to use when validating the keys of extra input data
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
extra_behavior: The extra behavior to use for the model fields
from_attributes: Whether the model fields should be populated from attributes
serialization: Custom serialization schema
"""
return _dict_not_none(
type='model-fields',
fields=fields,
model_name=model_name,
computed_fields=computed_fields,
strict=strict,
extras_schema=extras_schema,
extras_keys_schema=extras_keys_schema,
extra_behavior=extra_behavior,
from_attributes=from_attributes,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| ModelFieldsSchema |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/ssm/resources.py | {
"start": 4619,
"end": 14778
} | class ____(ResourceWithBoto3Configuration):
"""Resource that provides a dict which maps selected SSM Parameter Store parameters to
their string values. Optionally sets selected parameters as environment variables.
Example:
.. code-block:: python
import os
from typing import Dict
from dagster import build_op_context, job, op
from dagster_aws.ssm import ParameterStoreResource, ParameterStoreTag
@op
def example_parameter_store_op(parameter_store: ParameterStoreResource):
return parameter_store.fetch_parameters().get("my-parameter-name")
@op
def example_parameter_store_op_2(parameter_store: ParameterStoreResource):
with parameter_store.parameters_in_environment():
return os.getenv("my-other-parameter-name")
@job
def example_job():
example_parameter_store_op()
example_parameter_store_op_2()
defs = Definitions(
jobs=[example_job],
resource_defs={
'parameter_store': ParameterStoreResource(
region_name='us-west-1',
parameter_tags=[ParameterStoreTag(key='my-tag-key', values=['my-tag-value'])],
add_to_environment=True,
with_decryption=True,
)
},
)
"""
parameters: list[str] = Field(
default=[], description="An array of AWS SSM Parameter Store parameter names to fetch."
)
parameter_tags: list[ParameterStoreTag] = Field(
default=[],
description=(
"AWS SSM Parameter store parameters with this tag will be fetched and made available."
),
)
parameter_paths: list[str] = Field(
default=[], description="List of path prefixes to pull parameters from."
)
with_decryption: bool = Field(
default=False,
description=(
"Whether to decrypt parameters upon retrieval. Is ignored by AWS if parameter type is"
" String or StringList"
),
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@contextmanager
def parameters_in_environment(
self,
parameters: Optional[list[str]] = None,
parameter_tags: Optional[list[ParameterStoreTag]] = None,
parameter_paths: Optional[list[str]] = None,
) -> Generator[dict[str, str], None, None]:
"""Yields a dict which maps selected Parameter Store parameters to their string values. Also
sets chosen parameters as environment variables.
Args:
parameters (Optional[List[str]]): An array of AWS SSM Parameter Store parameter names to fetch.
Note that this will override the parameters specified in the resource config.
parameter_tags (Optional[List[ParameterStoreTag]]): AWS SSM Parameter store parameters with this tag
will be fetched and made available. Note that this will override the parameter_tags specified
in the resource config.
parameter_paths (Optional[List[str]]): List of path prefixes to pull parameters from. Note that this
will override the parameter_paths specified in the resource config.
"""
ssm_manager = construct_ssm_client(
max_attempts=self.max_attempts,
region_name=self.region_name,
profile_name=self.profile_name,
)
parameters_to_fetch = parameters if parameters is not None else self.parameters
parameter_tags_to_fetch = (
parameter_tags if parameter_tags is not None else self.parameter_tags
)
parameter_paths_to_fetch = (
parameter_paths if parameter_paths is not None else self.parameter_paths
)
results = []
if parameters_to_fetch:
results.append(
get_parameters_by_name(ssm_manager, parameters_to_fetch, self.with_decryption)
)
if parameter_tags_to_fetch:
parameter_tag_inputs = [
{"key": tag.key, "values": tag.values} for tag in parameter_tags_to_fetch
]
results.append(
get_parameters_by_tags(ssm_manager, parameter_tag_inputs, self.with_decryption)
)
if parameter_paths_to_fetch:
results.append(
get_parameters_by_paths(
ssm_manager,
parameter_paths_to_fetch, # type: ignore
self.with_decryption,
recursive=True,
)
)
if not results:
parameter_values = {}
else:
if len(results) > 1:
parameter_values = merge_dicts(*results)
else:
parameter_values = results[0]
with environ(parameter_values):
yield parameter_values
def fetch_parameters(
self,
parameters: Optional[list[str]] = None,
parameter_tags: Optional[list[ParameterStoreTag]] = None,
parameter_paths: Optional[list[str]] = None,
) -> dict[str, str]:
"""Fetches parameters from SSM Parameter Store and returns them as a dict.
Args:
parameters (Optional[List[str]]): An array of AWS SSM Parameter Store parameter names to fetch.
Note that this will override the parameters specified in the resource config.
parameter_tags (Optional[List[ParameterStoreTag]]): AWS SSM Parameter store parameters with this tag
will be fetched and made available. Note that this will override the parameter_tags specified
in the resource config.
parameter_paths (Optional[List[str]]): List of path prefixes to pull parameters from. Note that this
will override the parameter_paths specified in the resource config.
"""
with self.parameters_in_environment(
parameters=parameters, parameter_tags=parameter_tags, parameter_paths=parameter_paths
) as parameter_values:
return parameter_values
LEGACY_PARAMETERSTORE_SCHEMA = {
**cast("Shape", ParameterStoreResource.to_config_schema().as_field().config_type).fields,
"add_to_environment": LegacyDagsterField(
bool,
default_value=False,
description="Whether to add the parameters to the environment. Defaults to False.",
),
}
@beta
@dagster_maintained_resource
@resource(config_schema=LEGACY_PARAMETERSTORE_SCHEMA)
@contextmanager
def parameter_store_resource(context) -> Any:
"""Resource that provides a dict which maps selected SSM Parameter Store parameters to
their string values. Optionally sets selected parameters as environment variables.
Example:
.. code-block:: python
import os
from dagster import build_op_context, job, op
from dagster_aws.ssm import parameter_store_resource
@op(required_resource_keys={'parameter_store'})
def example_parameter_store_op(context):
return context.resources.parameter_store.get("my-parameter-name")
@op(required_resource_keys={'parameter_store'})
def example_parameter_store_op_2(context):
return os.getenv("my-other-parameter-name")
@job(resource_defs={'parameter_store': parameter_store_resource})
def example_job():
example_parameter_store_op()
example_parameter_store_op_2()
example_job.execute_in_process(
run_config={
'resources': {
'parameter_store': {
'config': {
'region_name': 'us-west-1',
'parameter_tags': 'dagster',
'add_to_environment': True,
'with_decryption': True,
}
}
}
}
)
Note that your ops must also declare that they require this resource with
`required_resource_keys`, or it will not be initialized for the execution of their compute
functions.
You may configure this resource as follows:
.. code-block:: YAML
resources:
parameter_store:
config:
region_name: "us-west-1"
# Optional[str]: Specifies a custom region for the Parameter Store session. Default is chosen
# through the ordinary boto credential chain.
profile_name: "dev"
# Optional[str]: Specifies a custom profile for Parameter Store session. Default is default
# profile as specified in ~/.aws/credentials file
parameters: ["parameter1", "/path/based/parameter2"]
# Optional[List[str]]: Specifies a list of parameter names to pull from parameter store.
parameters_tag: "dagster"
# Optional[Sequence[Dict[str, Any]]]: Specifies a list of tag specifications, all parameters which have the tag set
will be pulled from Parameter Store. Each tag specification is in the format {"tag": "tag name or prefix", "option": "BeginsWith|Equals"};
when option == "BeginsWith", all parameters with tags that start with the tag value will be pulled.
add_to_environment: true
# Optional[bool]: Whether to set the selected parameters as environment variables. Defaults
# to false.
"""
add_to_environment = context.resource_config.get("add_to_environment", False)
if add_to_environment:
with ParameterStoreResource.from_resource_context(
context
).parameters_in_environment() as secrets:
yield secrets
else:
yield ParameterStoreResource.from_resource_context(context).fetch_parameters()
| ParameterStoreResource |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_metaclass.py | {
"start": 575,
"end": 640
} | class ____:
pass
@six.add_metaclass(ValidAsMetaclass)
| ThirdGood |
python | weaviate__weaviate-python-client | weaviate/collections/aggregations/near_text/sync.py | {
"start": 193,
"end": 254
} | class ____(_NearTextExecutor[ConnectionSync]):
pass
| _NearText |
python | pytorch__pytorch | torch/backends/_nnapi/serializer.py | {
"start": 608,
"end": 2583
} | class ____:
ADD = 0
AVERAGE_POOL_2D = 1
CONCATENATION = 2
CONV_2D = 3
DEPTHWISE_CONV_2D = 4
DEPTH_TO_SPACE = 5
DEQUANTIZE = 6
EMBEDDING_LOOKUP = 7
FLOOR = 8
FULLY_CONNECTED = 9
HASHTABLE_LOOKUP = 10
L2_NORMALIZATION = 11
L2_POOL_2D = 12
LOCAL_RESPONSE_NORMALIZATION = 13
LOGISTIC = 14
LSH_PROJECTION = 15
LSTM = 16
MAX_POOL_2D = 17
MUL = 18
RELU = 19
RELU1 = 20
RELU6 = 21
RESHAPE = 22
RESIZE_BILINEAR = 23
RNN = 24
SOFTMAX = 25
SPACE_TO_DEPTH = 26
SVDF = 27
TANH = 28
BATCH_TO_SPACE_ND = 29
DIV = 30
MEAN = 31
PAD = 32
SPACE_TO_BATCH_ND = 33
SQUEEZE = 34
STRIDED_SLICE = 35
SUB = 36
TRANSPOSE = 37
ABS = 38
ARGMAX = 39
ARGMIN = 40
AXIS_ALIGNED_BBOX_TRANSFORM = 41
BIDIRECTIONAL_SEQUENCE_LSTM = 42
BIDIRECTIONAL_SEQUENCE_RNN = 43
BOX_WITH_NMS_LIMIT = 44
CAST = 45
CHANNEL_SHUFFLE = 46
DETECTION_POSTPROCESSING = 47
EQUAL = 48
EXP = 49
EXPAND_DIMS = 50
GATHER = 51
GENERATE_PROPOSALS = 52
GREATER = 53
GREATER_EQUAL = 54
GROUPED_CONV_2D = 55
HEATMAP_MAX_KEYPOINT = 56
INSTANCE_NORMALIZATION = 57
LESS = 58
LESS_EQUAL = 59
LOG = 60
LOGICAL_AND = 61
LOGICAL_NOT = 62
LOGICAL_OR = 63
LOG_SOFTMAX = 64
MAXIMUM = 65
MINIMUM = 66
NEG = 67
NOT_EQUAL = 68
PAD_V2 = 69
POW = 70
PRELU = 71
QUANTIZE = 72
QUANTIZED_16BIT_LSTM = 73
RANDOM_MULTINOMIAL = 74
REDUCE_ALL = 75
REDUCE_ANY = 76
REDUCE_MAX = 77
REDUCE_MIN = 78
REDUCE_PROD = 79
REDUCE_SUM = 80
ROI_ALIGN = 81
ROI_POOLING = 82
RSQRT = 83
SELECT = 84
SIN = 85
SLICE = 86
SPLIT = 87
SQRT = 88
TILE = 89
TOPK_V2 = 90
TRANSPOSE_CONV_2D = 91
UNIDIRECTIONAL_SEQUENCE_LSTM = 92
UNIDIRECTIONAL_SEQUENCE_RNN = 93
RESIZE_NEAREST_NEIGHBOR = 94
| NNAPI_OperationCode |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_datetime.py | {
"start": 2482,
"end": 4721
} | class ____:
def test_valid(self) -> None:
prop = bcpd.Datetime()
assert prop.is_valid(-1.0)
assert prop.is_valid(-1)
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(0.0)
assert prop.is_valid(1.0)
assert prop.is_valid("2020-01-11T13:00:00")
assert prop.is_valid("2020-01-11")
assert prop.is_valid(datetime.datetime.now())
assert prop.is_valid(datetime.time(10,12))
assert prop.is_valid(np.datetime64("2020-01-11"))
if is_installed("pandas"):
import pandas as pd
assert prop.is_valid(pd.Timestamp("2010-01-11"))
def test_invalid(self) -> None:
prop = bcpd.Datetime()
assert not prop.is_valid(None)
assert not prop.is_valid("")
assert not prop.is_valid("02 01 2019")
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_is_timestamp(self) -> None:
assert bcpd.Datetime.is_timestamp(0)
assert bcpd.Datetime.is_timestamp(0.0)
assert bcpd.Datetime.is_timestamp(10)
assert bcpd.Datetime.is_timestamp(10.0)
assert bcpd.Datetime.is_timestamp(-10)
assert bcpd.Datetime.is_timestamp(-10)
assert bcpd.Datetime.is_timestamp(-10.0)
assert not bcpd.Datetime.is_timestamp(True)
assert not bcpd.Datetime.is_timestamp(False)
def test_transform_date(self) -> None:
t = datetime.date(2020, 1, 11)
prop = bcpd.Datetime()
assert prop.transform(t) == convert_date_to_datetime(t)
def test_transform_str(self) -> None:
t = datetime.date(2020, 1, 11)
prop = bcpd.Datetime()
assert prop.transform("2020-01-11") == convert_date_to_datetime(t)
def test_has_ref(self) -> None:
prop = bcpd.Datetime()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpd.Datetime()
assert str(prop) == "Datetime"
| Test_Datetime |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braintree/source_braintree/schemas/cards.py | {
"start": 153,
"end": 538
} | class ____(CatalogModel):
company: str
country_code_alpha2: str
country_code_alpha3: str
country_code_numeric: str
country_name: str
created_at: datetime
customer_id: str
extended_address: str
first_name: str
id: str
last_name: str
locality: str
postal_code: str
region: str
street_address: str
updated_at: datetime
| Address |
python | streamlit__streamlit | lib/streamlit/elements/widgets/chat.py | {
"start": 7169,
"end": 12904
} | class ____(str, Enum):
USER = "user"
ASSISTANT = "assistant"
AI = "ai" # Equivalent to assistant
HUMAN = "human" # Equivalent to user
def _process_avatar_input(
avatar: str | AtomicImage | None, delta_path: str
) -> tuple[BlockProto.ChatMessage.AvatarType.ValueType, str]:
"""Detects the avatar type and prepares the avatar data for the frontend.
Parameters
----------
avatar :
The avatar that was provided by the user.
delta_path : str
The delta path is used as media ID when a local image is served via the media
file manager.
Returns
-------
Tuple[AvatarType, str]
The detected avatar type and the prepared avatar data.
"""
AvatarType = BlockProto.ChatMessage.AvatarType # noqa: N806
if avatar is None:
return AvatarType.ICON, ""
if isinstance(avatar, str) and avatar in {item.value for item in PresetNames}:
# On the frontend, we only support "assistant" and "user" for the avatar.
return (
AvatarType.ICON,
(
"assistant"
if avatar in [PresetNames.AI, PresetNames.ASSISTANT]
else "user"
),
)
if isinstance(avatar, str) and is_emoji(avatar):
return AvatarType.EMOJI, avatar
if isinstance(avatar, str) and avatar.startswith(":material"):
return AvatarType.ICON, validate_material_icon(avatar)
try:
return AvatarType.IMAGE, image_to_url(
avatar,
layout_config=LayoutConfig(width="content"),
clamp=False,
channels="RGB",
output_format="auto",
image_id=delta_path,
)
except Exception as ex:
raise StreamlitAPIException(
"Failed to load the provided avatar value as an image."
) from ex
def _pop_upload_files(
files_value: FileUploaderStateProto | None,
) -> list[UploadedFile]:
if files_value is None:
return []
ctx = get_script_run_ctx()
if ctx is None:
return []
uploaded_file_info = files_value.uploaded_file_info
if len(uploaded_file_info) == 0:
return []
file_recs_list = ctx.uploaded_file_mgr.get_files(
session_id=ctx.session_id,
file_ids=[f.file_id for f in uploaded_file_info],
)
file_recs = {f.file_id: f for f in file_recs_list}
collected_files: list[UploadedFile] = []
for f in uploaded_file_info:
maybe_file_rec = file_recs.get(f.file_id)
if maybe_file_rec is not None:
uploaded_file = UploadedFile(maybe_file_rec, f.file_urls)
collected_files.append(uploaded_file)
# Remove file from manager after creating UploadedFile object.
# Only MemoryUploadedFileManager implements remove_file.
# This explicit type check ensures we only use this cleanup logic
# with manager types we've explicitly approved.
if isinstance(ctx.uploaded_file_mgr, MemoryUploadedFileManager):
ctx.uploaded_file_mgr.remove_file(
session_id=ctx.session_id,
file_id=f.file_id,
)
return collected_files
def _pop_audio_file(
audio_file_info: UploadedFileInfoProto | None,
) -> UploadedFile | None:
"""Extract and return a single audio file from the protobuf message.
Similar to _pop_upload_files but handles a single audio file instead of a list.
Validates that the uploaded file is a WAV file.
Parameters
----------
audio_file_info : UploadedFileInfoProto or None
The protobuf message containing information about the uploaded audio file.
Returns
-------
UploadedFile or None
The extracted audio file if available, None otherwise.
Raises
------
StreamlitAPIException
If the uploaded audio file does not have a `.wav` extension or its MIME type is not
one of the accepted WAV types (`audio/wav`, `audio/wave`, `audio/x-wav`).
"""
if audio_file_info is None:
return None
ctx = get_script_run_ctx()
if ctx is None:
return None
file_recs_list = ctx.uploaded_file_mgr.get_files(
session_id=ctx.session_id,
file_ids=[audio_file_info.file_id],
)
if len(file_recs_list) == 0:
return None
file_rec = file_recs_list[0]
uploaded_file = UploadedFile(file_rec, audio_file_info.file_urls)
# Validate that the file is a WAV file by checking extension and MIME type
if not uploaded_file.name.lower().endswith(_ACCEPTED_AUDIO_EXTENSION):
raise StreamlitAPIException(
f"Invalid file extension for audio input: `{uploaded_file.name}`. "
f"Only WAV files ({_ACCEPTED_AUDIO_EXTENSION}) are accepted."
)
# Validate MIME type (browsers may send different variations of WAV MIME types)
if uploaded_file.type not in _ACCEPTED_AUDIO_MIME_TYPES:
raise StreamlitAPIException(
f"Invalid MIME type for audio input: `{uploaded_file.type}`. "
f"Expected one of {_ACCEPTED_AUDIO_MIME_TYPES}."
)
# Remove the file from the manager after creating the UploadedFile object.
# Only MemoryUploadedFileManager implements remove_file (not part of the
# UploadedFileManager Protocol). This explicit type check ensures we only
# use this cleanup logic with manager types we've explicitly approved.
if audio_file_info and isinstance(ctx.uploaded_file_mgr, MemoryUploadedFileManager):
ctx.uploaded_file_mgr.remove_file(
session_id=ctx.session_id,
file_id=audio_file_info.file_id,
)
return uploaded_file
@dataclass
| PresetNames |
python | facebook__pyre-check | client/tests/dataclasses_merge_test.py | {
"start": 889,
"end": 1181
} | class ____:
x: Optional[int] = field(
default=None, metadata={"merge_policy": Policy.RAISE_WHEN_OVERWRITTEN}
)
def _always_prefer_base(base: Optional[int], override: Optional[int]) -> Optional[int]:
return base
@dataclass_merge
@dataclass(frozen=True)
| RaiseWhenOverwritten |
python | sqlalchemy__sqlalchemy | examples/performance/single_inserts.py | {
"start": 661,
"end": 4850
} | class ____(Base):
__tablename__ = "customer"
id = Column(Integer, Identity(), primary_key=True)
name = Column(String(255))
description = Column(String(255))
Profiler.init("single_inserts", num=10000)
@Profiler.setup
def setup_database(dburl, echo, num):
global engine
engine = create_engine(dburl, echo=echo)
if engine.dialect.name == "sqlite":
engine.pool = pool.StaticPool(creator=engine.pool._creator)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
@Profiler.profile
def test_orm_commit(n):
"""Individual INSERT/COMMIT pairs via the ORM"""
for i in range(n):
session = Session(bind=engine)
session.add(
Customer(
name="customer name %d" % i,
description="customer description %d" % i,
)
)
session.commit()
@Profiler.profile
def test_bulk_save(n):
"""Individual INSERT/COMMIT pairs using the "bulk" API"""
for i in range(n):
session = Session(bind=engine)
session.bulk_save_objects(
[
Customer(
name="customer name %d" % i,
description="customer description %d" % i,
)
]
)
session.commit()
@Profiler.profile
def test_bulk_insert_dictionaries(n):
"""Individual INSERT/COMMIT pairs using the "bulk" API with dictionaries"""
for i in range(n):
session = Session(bind=engine)
session.bulk_insert_mappings(
Customer,
[
dict(
name="customer name %d" % i,
description="customer description %d" % i,
)
],
)
session.commit()
@Profiler.profile
def test_core(n):
"""Individual INSERT/COMMIT pairs using Core."""
for i in range(n):
with engine.begin() as conn:
conn.execute(
Customer.__table__.insert(),
dict(
name="customer name %d" % i,
description="customer description %d" % i,
),
)
@Profiler.profile
def test_core_query_caching(n):
"""Individual INSERT/COMMIT pairs using Core with query caching"""
cache = {}
ins = Customer.__table__.insert()
for i in range(n):
with engine.begin() as conn:
conn.execution_options(compiled_cache=cache).execute(
ins,
dict(
name="customer name %d" % i,
description="customer description %d" % i,
),
)
@Profiler.profile
def test_dbapi_raw_w_connect(n):
"""Individual INSERT/COMMIT pairs w/ DBAPI + connection each time"""
_test_dbapi_raw(n, True)
@Profiler.profile
def test_dbapi_raw_w_pool(n):
"""Individual INSERT/COMMIT pairs w/ DBAPI + connection pool"""
_test_dbapi_raw(n, False)
def _test_dbapi_raw(n, connect):
compiled = (
Customer.__table__.insert()
.values(name=bindparam("name"), description=bindparam("description"))
.compile(dialect=engine.dialect)
)
if compiled.positional:
args = (
("customer name %d" % i, "customer description %d" % i)
for i in range(n)
)
else:
args = (
dict(
name="customer name %d" % i,
description="customer description %d" % i,
)
for i in range(n)
)
sql = str(compiled)
if connect:
for arg in args:
# there's no connection pool, so if these were distinct
# calls, we'd be connecting each time
conn = engine.pool._creator()
cursor = conn.cursor()
cursor.execute(sql, arg)
cursor.lastrowid
conn.commit()
conn.close()
else:
for arg in args:
conn = engine.raw_connection()
cursor = conn.cursor()
cursor.execute(sql, arg)
cursor.lastrowid
conn.commit()
conn.close()
if __name__ == "__main__":
Profiler.main()
| Customer |
python | django__django | tests/serializers/models/data.py | {
"start": 2247,
"end": 2532
} | class ____(models.Model):
"""A tag on an item."""
data = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["data"]
| Tag |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 73974,
"end": 76319
} | class ____(ClapPreTrainedModel):
config: ClapTextConfig
input_modalities = ("text",)
def __init__(self, config: ClapTextConfig):
super().__init__(config)
self.text_model = ClapTextModel(config)
self.text_projection = ClapProjectionLayer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.text_model.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.text_model.embeddings.word_embeddings = value
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ClapTextModelOutput]:
r"""
Examples:
```python
>>> from transformers import AutoTokenizer, ClapTextModelWithProjection
>>> model = ClapTextModelWithProjection.from_pretrained("laion/clap-htsat-unfused")
>>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
>>> inputs = tokenizer(["a sound of a cat", "a sound of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> text_embeds = outputs.text_embeds
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
pooled_output = text_outputs[1] if not return_dict else text_outputs.pooler_output
text_embeds = self.text_projection(pooled_output)
return ClapTextModelOutput(
text_embeds=text_embeds,
last_hidden_state=text_outputs.last_hidden_state,
hidden_states=text_outputs.hidden_states,
attentions=text_outputs.attentions,
)
@auto_docstring
| ClapTextModelWithProjection |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/connectors/test_callback_connector.py | {
"start": 3789,
"end": 3890
} | class ____(Callback):
def state_dict(self):
return {"content0": 0}
| StatefulCallbackContent0 |
python | ray-project__ray | python/ray/train/_internal/data_config.py | {
"start": 401,
"end": 7002
} | class ____:
"""Class responsible for configuring Train dataset preprocessing.
For advanced use cases, this class can be subclassed and the `configure()` method
overriden for custom data preprocessing.
"""
def __init__(
self,
datasets_to_split: Union[Literal["all"], List[str]] = "all",
execution_options: Optional[
Union[ExecutionOptions, Dict[str, ExecutionOptions]]
] = None,
enable_shard_locality: bool = True,
):
"""Construct a DataConfig.
Args:
datasets_to_split: Specifies which datasets should be split among workers.
Can be set to "all" or a list of dataset names. Defaults to "all",
i.e. split all datasets.
execution_options: The execution options to pass to Ray Data. Can be either:
1. A single ExecutionOptions object that is applied to all datasets.
2. A dict mapping dataset names to ExecutionOptions. If a dataset name
is not in the dict, it defaults to ``DataConfig.default_ingest_options()``.
By default, the options are optimized for data ingest. When overriding,
base your options off ``DataConfig.default_ingest_options()``.
enable_shard_locality: If true, dataset sharding across Train workers will
consider locality to minimize cross-node data transfer. Enabled by default.
"""
if isinstance(datasets_to_split, list) or datasets_to_split == "all":
self._datasets_to_split = datasets_to_split
else:
raise TypeError(
"`datasets_to_split` should be a 'all' or a list of strings of "
"dataset names. Received "
f"{type(datasets_to_split).__name__} with value {datasets_to_split}."
)
default_execution_options = DataConfig.default_ingest_options()
if isinstance(execution_options, ExecutionOptions):
default_execution_options = execution_options
# If None, all datasets will use the default ingest options.
self._execution_options: Dict[str, ExecutionOptions] = defaultdict(
lambda: copy.deepcopy(default_execution_options)
)
if isinstance(execution_options, dict):
self._execution_options.update(execution_options)
self._enable_shard_locality = enable_shard_locality
self._num_train_cpus = 0.0
self._num_train_gpus = 0.0
def set_train_total_resources(self, num_train_cpus: float, num_train_gpus: float):
"""Set the total number of CPUs and GPUs used by training.
If CPU or GPU resource limits are not set, they will be set to the
total cluster resources minus the resources used by training.
"""
# TODO: We may also include other resources besides CPU and GPU.
self._num_train_cpus = num_train_cpus
self._num_train_gpus = num_train_gpus
def _get_execution_options(self, dataset_name: str) -> ExecutionOptions:
"""Return a copy of the configured execution options for a given dataset name."""
return copy.deepcopy(self._execution_options[dataset_name])
@DeveloperAPI
def configure(
self,
datasets: Dict[str, Dataset],
world_size: int,
worker_handles: Optional[List[ActorHandle]],
worker_node_ids: Optional[List[NodeIdStr]],
**kwargs,
) -> List[Dict[str, DataIterator]]:
"""Configure how Train datasets should be assigned to workers.
Args:
datasets: The datasets dict passed to Train by the user.
world_size: The number of Train workers in total.
worker_handles: The actor handles of the Train workers.
worker_node_ids: The node ids of the Train workers.
kwargs: Forwards compatibility placeholder.
Returns:
A list of dataset splits for each worker. The size of the list must be
equal to `world_size`. Each element of the list contains the assigned
`DataIterator` instances by name for the worker.
"""
output = [{} for _ in range(world_size)]
for dataset_name, dataset in datasets.items():
if dataset.name is None:
dataset.set_name(dataset_name)
if self._datasets_to_split == "all":
datasets_to_split = set(datasets.keys())
else:
datasets_to_split = set(self._datasets_to_split)
locality_hints = worker_node_ids if self._enable_shard_locality else None
for name, ds in datasets.items():
execution_options = self._get_execution_options(name)
if execution_options.is_resource_limits_default():
# If "resource_limits" is not overriden by the user,
# add training-reserved resources to Data's exclude_resources.
execution_options.exclude_resources = (
execution_options.exclude_resources.add(
ExecutionResources(
cpu=self._num_train_cpus, gpu=self._num_train_gpus
)
)
)
ds = ds.copy(ds)
ds.context.execution_options = execution_options
if name in datasets_to_split:
for i, split in enumerate(
ds.streaming_split(
world_size, equal=True, locality_hints=locality_hints
)
):
output[i][name] = split
else:
for i in range(world_size):
output[i][name] = ds.iterator()
return output
@staticmethod
def default_ingest_options() -> ExecutionOptions:
"""The default Ray Data options used for data ingest.
By default, configurations are carried over from what is already set
in DataContext.
"""
ctx = ray.data.DataContext.get_current()
return ExecutionOptions(
# TODO(hchen): Re-enable `locality_with_output` by default after fixing
# https://github.com/ray-project/ray/issues/40607
locality_with_output=ctx.execution_options.locality_with_output,
resource_limits=ctx.execution_options.resource_limits,
exclude_resources=ctx.execution_options.exclude_resources,
preserve_order=ctx.execution_options.preserve_order,
verbose_progress=ctx.execution_options.verbose_progress,
)
| DataConfig |
python | wandb__wandb | wandb/automations/actions.py | {
"start": 4276,
"end": 4416
} | class ____(GQLBase):
action_type: Annotated[ActionType, Field(frozen=True)]
"""The kind of action to be triggered."""
| _BaseActionInput |
python | PrefectHQ__prefect | tests/server/utilities/test_text_search_parser.py | {
"start": 21446,
"end": 24191
} | class ____:
"""Test parsing with international characters and languages"""
def test_japanese_terms(self):
# Japanese: error, flow, test
result = parse_text_search_query("エラー フロー -テスト")
assert result == TextSearchQuery(
include=["エラー", "フロー"], exclude=["テスト"], required=[]
)
def test_chinese_simplified_terms(self):
# Chinese: error, connection, debug
result = parse_text_search_query("错误 连接 -调试")
assert result == TextSearchQuery(
include=["错误", "连接"], exclude=["调试"], required=[]
)
def test_chinese_traditional_terms(self):
# Traditional Chinese: database, timeout
result = parse_text_search_query("資料庫 +超時")
assert result == TextSearchQuery(
include=["資料庫"], exclude=[], required=["超時"]
)
def test_cyrillic_terms(self):
# Russian: error, flow, test
result = parse_text_search_query("ошибка поток -тест")
assert result == TextSearchQuery(
include=["ошибка", "поток"], exclude=["тест"], required=[]
)
def test_french_with_accents(self):
# French: error, connection, test environment
result = parse_text_search_query('erreur connexión -"environment de tést"')
assert result == TextSearchQuery(
include=["erreur", "connexión"],
exclude=["environment de tést"],
required=[],
)
def test_german_compound_words(self):
# German compound words
result = parse_text_search_query(
"Verbindungsfehler Datenbankzugriff -Testumgebung"
)
assert result == TextSearchQuery(
include=["Verbindungsfehler", "Datenbankzugriff"],
exclude=["Testumgebung"],
required=[],
)
def test_arabic_terms(self):
# Arabic: error, connection (right-to-left text)
result = parse_text_search_query("خطأ اتصال")
assert result == TextSearchQuery(
include=["خطأ", "اتصال"], exclude=[], required=[]
)
def test_mixed_languages_in_query(self):
# Mixed language query
result = parse_text_search_query(
'error エラー -debug -調試 +"connection établie"'
)
assert result == TextSearchQuery(
include=["error", "エラー"],
exclude=["debug", "調試"],
required=["connection établie"],
)
def test_emoji_in_search_terms(self):
# Modern usage might include emoji
result = parse_text_search_query("🚫 error ✅ success -🐛 -bug")
assert result == TextSearchQuery(
include=["🚫", "error", "✅", "success"], exclude=["🐛", "bug"], required=[]
)
| TestMultilingualSupport |
python | encode__django-rest-framework | rest_framework/request.py | {
"start": 1087,
"end": 2002
} | class ____:
"""
A context manager that temporarily overrides the method on a request,
additionally setting the `view.request` attribute.
Usage:
with override_method(view, request, 'POST') as request:
... # Do stuff with `view` and `request`
"""
def __init__(self, view, request, method):
self.view = view
self.request = request
self.method = method
self.action = getattr(view, 'action', None)
def __enter__(self):
self.view.request = clone_request(self.request, self.method)
# For viewsets we also set the `.action` attribute.
action_map = getattr(self.view, 'action_map', {})
self.view.action = action_map.get(self.method.lower())
return self.view.request
def __exit__(self, *args, **kwarg):
self.view.request = self.request
self.view.action = self.action
| override_method |
python | huggingface__transformers | src/transformers/models/ijepa/modular_ijepa.py | {
"start": 474,
"end": 3670
} | class ____(ViTEmbeddings):
def __init__(self, config: IJepaConfig, use_mask_token: bool = False) -> None:
super().__init__(config, use_mask_token)
# Remove cls_token from IJepaEmbeddings, as it is not used in the model
del self.cls_token
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches, config.hidden_size))
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1]
num_positions = self.position_embeddings.shape[1]
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
patch_pos_embed = self.position_embeddings
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return patch_pos_embed
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
interpolate_pos_encoding: bool = False,
) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
if bool_masked_pos is not None:
seq_length = embeddings.shape[1]
mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
# add positional encoding to each token
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
@auto_docstring
| IJepaEmbeddings |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 21089,
"end": 21210
} | class ____(BaseModel):
count: int = Field(..., description="Number of points which satisfy the conditions")
| CountResult |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_combined11.py | {
"start": 315,
"end": 2234
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_combined11.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:dispBlanksAs"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart_doughnut = workbook.add_chart({"type": "doughnut"})
chart_pie = workbook.add_chart({"type": "pie"})
worksheet.write_column("H2", ["Donut", 25, 50, 25, 100])
worksheet.write_column("I2", ["Pie", 75, 1, 124])
chart_doughnut.add_series(
{
"name": "=Sheet1!$H$2",
"values": "=Sheet1!$H$3:$H$6",
"points": [
{"fill": {"color": "#FF0000"}},
{"fill": {"color": "#FFC000"}},
{"fill": {"color": "#00B050"}},
{"fill": {"none": True}},
],
}
)
chart_doughnut.set_rotation(270)
chart_doughnut.set_legend({"none": True})
chart_doughnut.set_chartarea(
{
"border": {"none": True},
"fill": {"none": True},
}
)
chart_pie.add_series(
{
"name": "=Sheet1!$I$2",
"values": "=Sheet1!$I$3:$I$6",
"points": [
{"fill": {"none": True}},
{"fill": {"color": "#FF0000"}},
{"fill": {"none": True}},
],
}
)
chart_pie.set_rotation(270)
chart_doughnut.combine(chart_pie)
worksheet.insert_chart("A1", chart_doughnut)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | providers/standard/src/airflow/providers/standard/operators/python.py | {
"start": 3535,
"end": 4196
} | class ____(NamedTuple):
"""Provide the same interface as ``sys.version_info``."""
major: int
minor: int
micro: int
releaselevel: str
serial: int
@classmethod
def from_executable(cls, executable: str) -> _PythonVersionInfo:
"""Parse python version info from an executable."""
cmd = [executable, "-c", 'import sys; print(".".join(map(str, sys.version_info)))']
try:
result = subprocess.check_output(cmd, text=True)
except Exception as e:
raise ValueError(f"Error while executing command {cmd}: {e}")
return cls(*_parse_version_info(result.strip()))
| _PythonVersionInfo |
python | joke2k__faker | faker/providers/currency/ng_NG/__init__.py | {
"start": 46,
"end": 284
} | class ____(CurrencyProvider):
price_formats = ["#.##", "%#.##", "%##.##", "%,###.##"]
def pricetag(self) -> str:
return "\N{NAIRA SIGN}" + "\N{NO-BREAK SPACE}" + self.numerify(self.random_element(self.price_formats))
| Provider |
python | psf__black | tests/data/cases/no_blank_line_before_docstring.py | {
"start": 60,
"end": 118
} | class ____:
"""Please move me up"""
| LineBeforeDocstring |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 37395,
"end": 37754
} | class ____(TestCase):
def test_overflow_error_float_field(self):
field = serializers.FloatField()
with pytest.raises(serializers.ValidationError) as exec_info:
field.to_internal_value(data=math.factorial(171))
assert "Integer value too large to convert to float" in str(exec_info.value.detail)
| TestFloatFieldOverFlowError |
python | getsentry__sentry | src/sentry/preprod/api/models/project_preprod_build_details_models.py | {
"start": 435,
"end": 512
} | class ____(BaseModel):
has_missing_dsym_binaries: bool = False
| AppleAppInfo |
python | squidfunk__mkdocs-material | material/plugins/search/config.py | {
"start": 1887,
"end": 2524
} | class ____(Config):
enabled = Type(bool, default = True)
# Settings for search
lang = Optional(LangOption())
separator = Optional(Type(str))
pipeline = Optional(ListOfItems(Choice(pipeline)))
fields = Type(dict, default = {})
# Settings for text segmentation (Chinese)
jieba_dict = Optional(Type(str))
jieba_dict_user = Optional(Type(str))
# Unsupported settings, originally implemented in MkDocs
indexing = Deprecated(message = "Unsupported option")
prebuild_index = Deprecated(message = "Unsupported option")
min_search_length = Deprecated(message = "Unsupported option")
| SearchConfig |
python | TheAlgorithms__Python | data_structures/hashing/hash_map.py | {
"start": 408,
"end": 459
} | class ____[KEY, VAL]:
key: KEY
val: VAL
| _Item |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/schedule.py | {
"start": 129,
"end": 255
} | class ____(str, Enum):
"""Schedule execution status."""
RUNNING = "RUNNING"
STOPPED = "STOPPED"
| DgApiScheduleStatus |
python | skorch-dev__skorch | skorch/callbacks/training.py | {
"start": 17808,
"end": 22553
} | class ____(Callback):
"""Map arbitrary functions over module parameters filtered by pattern
matching.
In the simplest case the function is only applied once at
the beginning of a given epoch (at ``on_epoch_begin``) but more complex
execution schemes (e.g. periodic application) are possible using
``at`` and ``scheduler``.
Notes
-----
When starting the training process after saving and loading a model,
``ParamMapper`` might re-initialize parts of your model when the
history is not saved along with the model. To avoid this, in case
you use ``ParamMapper`` (or subclasses, e.g. :class:`.Initializer`)
and want to save your model make sure to either (a) use pickle,
(b) save and load the history or (c) remove the parameter mapper
callbacks before continuing training.
Examples
--------
Initialize a layer on first epoch before the first training step:
>>> init = partial(torch.nn.init.uniform_, a=0, b=1)
>>> cb = ParamMapper('linear*.weight', at=1, fn=init)
>>> net = Net(myModule, callbacks=[cb])
Reset layer initialization if train loss reaches a certain value
(e.g. re-initialize on overfit):
>>> at = lambda net: net.history[-1, 'train_loss'] < 0.1
>>> init = partial(torch.nn.init.uniform_, a=0, b=1)
>>> cb = ParamMapper('linear0.weight', at=at, fn=init)
>>> net = Net(myModule, callbacks=[cb])
Periodically freeze and unfreeze all embedding layers:
>>> def my_sched(net):
... if len(net.history) % 2 == 0:
... return skorch.utils.freeze_parameter
... else:
... return skorch.utils.unfreeze_parameter
>>> cb = ParamMapper('embedding*.weight', schedule=my_sched)
>>> net = Net(myModule, callbacks=[cb])
Parameters
----------
patterns : str or callable or list
The pattern(s) to match parameter names against. Patterns are
UNIX globbing patterns as understood by :func:`~fnmatch.fnmatch`.
Patterns can also be callables which will get called with the
parameter name and are regarded as a match when the callable
returns a truthy value.
This parameter also supports lists of str or callables so that
one ``ParamMapper`` can match a group of parameters.
Example: ``'linear*.weight'`` or ``['linear0.*', 'linear1.bias']``
or ``lambda name: name.startswith('linear')``.
fn : function
The function to apply to each parameter separately.
at : int or callable
In case you specify an integer it represents the epoch number the
function ``fn`` is applied to the parameters, in case ``at`` is
a function it will receive ``net`` as parameter and the function
is applied to the parameter once ``at`` returns ``True``.
schedule : callable or None
If specified this callable supersedes the static ``at``/``fn``
combination by dynamically returning the function that is applied
on the matched parameters. This way you can, for example, create a
schedule that periodically freezes and unfreezes layers.
The callable's signature is ``schedule(net: NeuralNet) -> callable``.
"""
def __init__(self, patterns, fn=noop, at=1, schedule=None):
self.at = at
self.fn = fn
self.schedule = schedule
self.patterns = patterns
def initialize(self):
if not self.schedule:
self.schedule = self._default_schedule
if not isinstance(self.patterns, (list, tuple)):
self.patterns = [self.patterns]
if isinstance(self.at, int):
if self.at <= 0:
raise ValueError(
'Invalid value for `at` (at={}). The first possible '
'epoch number is 1.'.format(self.at))
self.at = partial(self._epoch_at, epoch=self.at)
return self
def named_parameters(self, net):
return net.get_all_learnable_params()
def filter_parameters(self, patterns, params):
pattern_fns = (
pattern if callable(pattern) else partial(fnmatch, pat=pattern)
for pattern in patterns
)
for pattern_fn, (name, param) in product(pattern_fns, params):
if pattern_fn(name):
yield name, param
def _default_schedule(self, net):
if self.at(net):
return self.fn
return noop
def _epoch_at(self, net, epoch=1):
return len(net.history) == epoch
def on_epoch_begin(self, net, **kwargs):
params = self.named_parameters(net)
params = self.filter_parameters(self.patterns, params)
map_fn = self.schedule(net)
for _, p in params:
map_fn(p)
| ParamMapper |
python | coleifer__peewee | peewee.py | {
"start": 42047,
"end": 42371
} | class ____(WrappedNode):
def __init__(self, node, cast):
super(Cast, self).__init__(node)
self._cast = cast
self._coerce = False
def __sql__(self, ctx):
return (ctx
.literal('CAST(')
.sql(self.node)
.literal(' AS %s)' % self._cast))
| Cast |
python | spyder-ide__spyder | spyder/widgets/collectionseditor.py | {
"start": 21961,
"end": 24939
} | class ____(ReadOnlyCollectionsModel):
"""Collections Table Model"""
def set_value(self, index, value):
"""Set value"""
self._data[self.keys[index.row()]] = value
self.showndata[self.keys[index.row()]] = value
self.sizes[index.row()] = get_size(value)
self.types[index.row()] = get_human_readable_type(value)
self.sig_setting_data.emit()
def type_to_color(self, python_type, numpy_type):
"""Get the color that corresponds to a Python type."""
# Color for unknown types
color = SpyderPalette.GROUP_12
if numpy_type != 'Unknown':
if numpy_type == 'Array':
color = SpyderPalette.GROUP_9
elif numpy_type == 'Scalar':
color = SpyderPalette.GROUP_2
elif python_type == 'bool':
color = SpyderPalette.GROUP_1
elif python_type in ['int', 'float', 'complex']:
color = SpyderPalette.GROUP_2
elif python_type in ['str', 'unicode']:
color = SpyderPalette.GROUP_3
elif 'datetime' in python_type:
color = SpyderPalette.GROUP_4
elif python_type == 'list':
color = SpyderPalette.GROUP_5
elif python_type in ['set', 'frozenset']:
color = SpyderPalette.GROUP_6
elif python_type == 'tuple':
color = SpyderPalette.GROUP_7
elif python_type == 'dict':
color = SpyderPalette.GROUP_8
elif python_type in ['MaskedArray', 'Matrix', 'NDArray']:
color = SpyderPalette.GROUP_9
elif (python_type in ['DataFrame', 'Series'] or
'Index' in python_type):
color = SpyderPalette.GROUP_10
elif python_type == 'PIL.Image.Image':
color = SpyderPalette.GROUP_11
else:
color = SpyderPalette.GROUP_12
return color
def get_bgcolor(self, index):
"""Background color depending on value."""
value = self.get_value(index)
if index.column() < 3:
color = ReadOnlyCollectionsModel.get_bgcolor(self, index)
else:
if self.remote:
python_type = value['python_type']
numpy_type = value['numpy_type']
else:
python_type = get_type_string(value)
numpy_type = get_numpy_type_string(value)
color_name = self.type_to_color(python_type, numpy_type)
color = QColor(color_name)
color.setAlphaF(0.5)
return color
def setData(self, index, value, role=Qt.EditRole):
"""Cell content change"""
if not index.isValid():
return False
if index.column() < 3:
return False
value = display_to_value(value, self.get_value(index),
ignore_errors=True)
self.set_value(index, value)
self.dataChanged.emit(index, index)
return True
| CollectionsModel |
python | walkccc__LeetCode | solutions/713. Subarray Product Less Than K/713.py | {
"start": 0,
"end": 310
} | class ____:
def numSubarrayProductLessThanK(self, nums: list[int], k: int) -> int:
if k <= 1:
return 0
ans = 0
prod = 1
j = 0
for i, num in enumerate(nums):
prod *= num
while prod >= k:
prod /= nums[j]
j += 1
ans += i - j + 1
return ans
| Solution |
python | apache__airflow | providers/tableau/tests/unit/tableau/sensors/test_tableau.py | {
"start": 1037,
"end": 2569
} | class ____:
"""
Test Class for JobStatusSensor
"""
def setup_method(self):
self.kwargs = {"job_id": "job_2", "site_id": "test_site", "task_id": "task", "dag": None}
@patch("airflow.providers.tableau.sensors.tableau.TableauHook")
def test_poke(self, mock_tableau_hook):
"""
Test poke
"""
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
mock_tableau_hook.get_job_status.return_value = TableauJobFinishCode.SUCCESS
sensor = TableauJobStatusSensor(**self.kwargs)
job_finished = sensor.poke(context={})
assert job_finished
mock_tableau_hook.get_job_status.assert_called_once_with(job_id=sensor.job_id)
@pytest.mark.parametrize(
"finish_code",
[
pytest.param(TableauJobFinishCode.ERROR, id="ERROR"),
pytest.param(TableauJobFinishCode.CANCELED, id="CANCELED"),
],
)
@patch("airflow.providers.tableau.sensors.tableau.TableauHook")
def test_poke_failed(self, mock_tableau_hook, finish_code):
"""
Test poke failed
"""
mock_tableau_hook.return_value.__enter__ = Mock(return_value=mock_tableau_hook)
mock_tableau_hook.get_job_status.return_value = finish_code
sensor = TableauJobStatusSensor(**self.kwargs)
with pytest.raises(AirflowException):
sensor.poke({})
mock_tableau_hook.get_job_status.assert_called_once_with(job_id=sensor.job_id)
| TestTableauJobStatusSensor |
python | Textualize__textual | docs/examples/app/widgets01.py | {
"start": 81,
"end": 296
} | class ____(App):
def compose(self) -> ComposeResult:
yield Welcome()
def on_button_pressed(self) -> None:
self.exit()
if __name__ == "__main__":
app = WelcomeApp()
app.run()
| WelcomeApp |
python | pyca__cryptography | src/cryptography/hazmat/primitives/_modes.py | {
"start": 1307,
"end": 1513
} | class ____(Mode, metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def nonce(self) -> utils.Buffer:
"""
The value of the nonce for this mode as bytes.
"""
| ModeWithNonce |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_batch_waiters.py | {
"start": 2058,
"end": 8597
} | class ____:
@pytest.fixture(autouse=True)
def setup_tests(self, patch_hook):
self.job_id = "8ba9d676-4108-4474-9dca-8bbac1da9b19"
self.region_name = AWS_REGION
self.batch_waiters = BatchWaitersHook(region_name=self.region_name)
assert self.batch_waiters.aws_conn_id == "aws_default"
assert self.batch_waiters.region_name == self.region_name
# don't pause in these unit tests
self.mock_delay = mock.Mock(return_value=None)
self.batch_waiters.delay = self.mock_delay
self.mock_exponential_delay = mock.Mock(return_value=0)
self.batch_waiters.exponential_delay = self.mock_exponential_delay
def test_default_config(self):
# the default config is used when no custom config is provided
config = self.batch_waiters.default_config
assert config == self.batch_waiters.waiter_config
assert isinstance(config, dict)
assert config["version"] == 2
assert isinstance(config["waiters"], dict)
waiters = list(sorted(config["waiters"].keys()))
assert waiters == ["JobComplete", "JobExists", "JobRunning"]
def test_list_waiters(self):
# the default config is used when no custom config is provided
config = self.batch_waiters.waiter_config
assert isinstance(config["waiters"], dict)
waiters = list(sorted(config["waiters"].keys()))
assert waiters == ["JobComplete", "JobExists", "JobRunning"]
assert waiters == self.batch_waiters.list_waiters()
def test_waiter_model(self):
model = self.batch_waiters.waiter_model
assert isinstance(model, WaiterModel)
# test some of the default config
assert model.version == 2
waiters = sorted(model.waiter_names)
assert waiters == ["JobComplete", "JobExists", "JobRunning"]
# test errors when requesting a waiter with the wrong name
with pytest.raises(ValueError, match="Waiter does not exist: JobExist"):
model.get_waiter("JobExist")
# test some default waiter properties
waiter = model.get_waiter("JobExists")
assert isinstance(waiter, SingleWaiterConfig)
assert waiter.max_attempts == 100
waiter.max_attempts = 200
assert waiter.max_attempts == 200
assert waiter.delay == 2
waiter.delay = 10
assert waiter.delay == 10
assert waiter.operation == "DescribeJobs"
def test_wait_for_job(self):
import sys
# mock delay for speedy test
mock_jitter = mock.Mock(return_value=0)
self.batch_waiters.add_jitter = mock_jitter
with mock.patch.object(self.batch_waiters, "get_waiter") as get_waiter:
self.batch_waiters.wait_for_job(self.job_id)
assert get_waiter.call_args_list == [
mock.call("JobExists"),
mock.call("JobRunning"),
mock.call("JobComplete"),
]
mock_waiter = get_waiter.return_value
mock_waiter.wait.assert_called_with(jobs=[self.job_id])
assert mock_waiter.wait.call_count == 3
mock_config = mock_waiter.config
assert mock_config.delay == 0
assert mock_config.max_attempts == sys.maxsize
def test_wait_for_job_with_cloudwatch_logs(self):
# mock delay for speedy test
mock_jitter = mock.Mock(return_value=0)
self.batch_waiters.add_jitter = mock_jitter
batch_log_fetcher = mock.Mock(spec=AwsTaskLogFetcher)
mock_get_batch_log_fetcher = mock.Mock(return_value=batch_log_fetcher)
thread_start = mock.Mock(side_effect=lambda: time.sleep(2))
thread_stop = mock.Mock(side_effect=lambda: time.sleep(2))
thread_join = mock.Mock(side_effect=lambda: time.sleep(2))
with (
mock.patch.object(self.batch_waiters, "get_waiter") as mock_get_waiter,
mock.patch.object(batch_log_fetcher, "start", thread_start) as mock_fetcher_start,
mock.patch.object(batch_log_fetcher, "stop", thread_stop) as mock_fetcher_stop,
mock.patch.object(batch_log_fetcher, "join", thread_join) as mock_fetcher_join,
):
# Run the wait_for_job method
self.batch_waiters.wait_for_job(self.job_id, get_batch_log_fetcher=mock_get_batch_log_fetcher)
# Assertions
assert mock_get_waiter.call_args_list == [
mock.call("JobExists"),
mock.call("JobRunning"),
mock.call("JobComplete"),
]
mock_get_waiter.return_value.wait.assert_called_with(jobs=[self.job_id])
mock_get_batch_log_fetcher.assert_called_with(self.job_id)
mock_fetcher_start.assert_called_once()
mock_fetcher_stop.assert_called_once()
mock_fetcher_join.assert_called_once()
def test_wait_for_job_raises_for_client_error(self):
# mock delay for speedy test
mock_jitter = mock.Mock(return_value=0)
self.batch_waiters.add_jitter = mock_jitter
with mock.patch.object(self.batch_waiters, "get_waiter") as get_waiter:
mock_waiter = get_waiter.return_value
mock_waiter.wait.side_effect = ClientError(
error_response={"Error": {"Code": "TooManyRequestsException"}},
operation_name="get job description",
)
with pytest.raises(AirflowException):
self.batch_waiters.wait_for_job(self.job_id)
assert get_waiter.call_args_list == [mock.call("JobExists")]
mock_waiter.wait.assert_called_with(jobs=[self.job_id])
assert mock_waiter.wait.call_count == 1
def test_wait_for_job_raises_for_waiter_error(self):
# mock delay for speedy test
mock_jitter = mock.Mock(return_value=0)
self.batch_waiters.add_jitter = mock_jitter
with mock.patch.object(self.batch_waiters, "get_waiter") as get_waiter:
mock_waiter = get_waiter.return_value
mock_waiter.wait.side_effect = WaiterError(
name="JobExists", reason="unit test error", last_response={}
)
with pytest.raises(AirflowException):
self.batch_waiters.wait_for_job(self.job_id)
assert get_waiter.call_args_list == [mock.call("JobExists")]
mock_waiter.wait.assert_called_with(jobs=[self.job_id])
assert mock_waiter.wait.call_count == 1
@mock_aws
| TestBatchWaiters |
python | numba__numba | numba/tests/test_numbers.py | {
"start": 278,
"end": 3415
} | class ____(TestCase):
""" This tests the 'view' method on NumPy scalars. """
def do_testing(self, inputs, dtypes):
for value, initial_type, expected in inputs:
for target_type, result in zip(dtypes, expected):
view = njit(gen_view(initial_type, target_type))
if not np.isnan(result):
# check against predefined value
self.assertEqual(view(value), target_type(result))
# check against numpy
self.assertEqual(view(value),
view.py_func(value))
else:
# check that our implementation results in nan
self.assertTrue(np.isnan(view(value)))
# check that numpy results in nan
self.assertTrue(np.isnan(view.py_func(value)))
def test_8_bits(self):
dtypes = (np.uint8, np.int8)
# Value Initial Type Expected answers using dtypes
inputs = ((1, np.uint8, (1, 1)),
(-1, np.int8, (255, -1)))
self.do_testing(inputs, dtypes)
def test_32_bits(self):
dtypes = (np.uint32, np.int32, np.float32)
# Value Initial Type Expected answers using dtypes
inputs = ((1, np.uint32, (1, 1, 1.401298464324817e-45)),
(-1, np.int32, (4294967295, -1, np.nan)),
(1.0, np.float32, (1065353216, 1065353216, 1.0)))
self.do_testing(inputs, dtypes)
def test_64_bits(self):
dtypes = (np.uint64, np.int64, np.float64)
# Value Initial Type Expected answers using dtypes
inputs = ((1, np.uint64, (1, 1, 5e-324)),
(-1, np.int64, (18446744073709551615, -1, np.nan)),
(1.0, np.float64, (4607182418800017408,
4607182418800017408,
1.0))
)
self.do_testing(inputs, dtypes)
def test_python_scalar_exception(self):
intty = getattr(np, 'int{}'.format(types.intp.bitwidth))
@njit
def myview():
a = 1
a.view(intty)
with self.assertRaises(TypingError) as e:
myview()
self.assertIn("'view' can only be called on NumPy dtypes, "
"try wrapping the variable 'a' with 'np.<dtype>()'",
str(e.exception))
def do_testing_exceptions(self, pair):
with self.assertRaises(TypingError) as e:
view = njit(gen_view(pair[0], pair[1]))
view(1)
self.assertIn("Changing the dtype of a 0d array is only supported "
"if the itemsize is unchanged",
str(e.exception))
def test_exceptions32(self):
for pair in ((np.int32, np.int8), (np.int8, np.int32)):
self.do_testing_exceptions(pair)
def test_exceptions64(self):
for pair in ((np.int32, np.int64), (np.int64, np.int32)):
self.do_testing_exceptions(pair)
| TestViewIntFloat |
python | pdm-project__pdm | src/pdm/cli/commands/python.py | {
"start": 2031,
"end": 3568
} | class ____(BaseCommand):
"""Remove a Python interpreter installed with PDM"""
arguments = (verbose_option,)
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("version", help="The Python version to remove. E.g. cpython@3.10.3")
def handle(self, project: Project, options: Namespace) -> None:
ui = project.core.ui
root = Path(project.config["python.install_root"]).expanduser()
if not root.exists():
ui.error(f"No Python interpreter found for {options.version!r}")
sys.exit(1)
version = str(options.version)
if root.joinpath(version).exists():
version_dir = root.joinpath(version)
else:
version = options.version.lower()
if "@" not in version: # pragma: no cover
version = f"cpython@{version}"
version_dir = root.joinpath(version)
if not version_dir.exists():
ui.error(f"No Python interpreter found for {options.version!r}")
ui.echo("Installed Pythons:", err=True)
for child in root.iterdir():
if not child.name.startswith("."):
ui.echo(f" {child.name}", err=True)
sys.exit(1)
if version_dir.is_symlink():
version_dir.unlink()
else:
shutil.rmtree(version_dir, ignore_errors=True)
ui.echo(f"[success]Removed installed[/] {options.version}", verbosity=Verbosity.NORMAL)
| RemoveCommand |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_searchstrategy.py | {
"start": 4373,
"end": 4685
} | class ____:
x: str
def to_json(self):
return "surprise!"
def test_jsonable_override():
obj = HasCustomJsonFormat("expected")
assert to_jsonable(obj, avoid_realization=False) == "surprise!"
assert to_jsonable(obj, avoid_realization=True) == "<symbolic>"
@dataclass
| HasCustomJsonFormat |
python | openai__openai-python | src/openai/types/conversations/conversation_item.py | {
"start": 4496,
"end": 4908
} | class ____(BaseModel):
id: str
"""The unique ID of the approval request."""
arguments: str
"""A JSON string of arguments for the tool."""
name: str
"""The name of the tool to run."""
server_label: str
"""The label of the MCP server making the request."""
type: Literal["mcp_approval_request"]
"""The type of the item. Always `mcp_approval_request`."""
| McpApprovalRequest |
python | PyCQA__pylint | tests/functional/u/undefined/undefined_variable.py | {
"start": 3189,
"end": 3312
} | class ____:
""" No error should be raised here. """
def test(self):
""" empty """
return Self1
| Self1 |
python | pytorch__pytorch | torch/distributed/fsdp/_fully_shard/_fsdp_api.py | {
"start": 4608,
"end": 5442
} | class ____(OffloadPolicy):
"""
This offload policy offloads parameters, gradients, and optimizer states to
CPU. Sharded parameters are copied host-to-device before all-gather. The
all-gathered parameters are freed according to ``reshard_after_forward``.
Sharded gradients are copied device-to-host in backward, and the optimizer
step runs on CPU with CPU optimizer states.
Attributes:
pin_memory (bool): Whether to pin sharded parameter and gradient
memory. Pinning memory allows both more efficient H2D/D2H copies
and for the copies to overlap with compute. However, the pinned
memory cannot be used by other processes. Set this to ``False`` if
you have insufficient CPU memory. (Default: ``True``)
"""
pin_memory: bool = True
| CPUOffloadPolicy |
python | keras-team__keras | keras/src/layers/convolutional/depthwise_conv_test.py | {
"start": 10920,
"end": 14753
} | class ____(testing.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
},
{
"depth_multiplier": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
)
def test_depthwise_conv1d(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.DepthwiseConv1D(
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(depth_multiplier * 4,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_depthwise_conv1d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
},
{
"depth_multiplier": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
)
def test_depthwise_conv2d(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(depth_multiplier * 4,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_depthwise_conv2d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
self.assertAllClose(outputs.shape, expected.shape)
self.assertAllClose(outputs, expected, atol=1e-5)
| DepthwiseConvCorrectnessTest |
python | kamyu104__LeetCode-Solutions | Python/subrectangle-queries.py | {
"start": 1044,
"end": 1764
} | class ____(object):
def __init__(self, rectangle):
"""
:type rectangle: List[List[int]]
"""
self.__rectangle = rectangle
def updateSubrectangle(self, row1, col1, row2, col2, newValue):
"""
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:type newValue: int
:rtype: None
"""
for r in xrange(row1, row2+1):
for c in xrange(col1, col2+1):
self.__rectangle[r][c] = newValue
def getValue(self, row, col):
"""
:type row: int
:type col: int
:rtype: int
"""
return self.__rectangle[row][col]
| SubrectangleQueries2 |
python | doocs__leetcode | solution/1000-1099/1051.Height Checker/Solution2.py | {
"start": 0,
"end": 359
} | class ____:
def heightChecker(self, heights: List[int]) -> int:
cnt = [0] * 101
for h in heights:
cnt[h] += 1
ans = i = 0
for j in range(1, 101):
while cnt[j]:
cnt[j] -= 1
if heights[i] != j:
ans += 1
i += 1
return ans
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_drypython_returns.py | {
"start": 954,
"end": 1041
} | class ____(Generic[_LawType]):
"""This type defines law-related operations."""
| Lawful |
python | run-llama__llama_index | llama-index-core/llama_index/core/node_parser/interface.py | {
"start": 8280,
"end": 10107
} | class ____(TextSplitter):
@abstractmethod
def split_text_metadata_aware(self, text: str, metadata_str: str) -> List[str]: ...
def split_texts_metadata_aware(
self, texts: List[str], metadata_strs: List[str]
) -> List[str]:
if len(texts) != len(metadata_strs):
raise ValueError("Texts and metadata_strs must have the same length")
nested_texts = [
self.split_text_metadata_aware(text, metadata)
for text, metadata in zip(texts, metadata_strs)
]
return [item for sublist in nested_texts for item in sublist]
def _get_metadata_str(self, node: BaseNode) -> str:
"""Helper function to get the proper metadata str for splitting."""
embed_metadata_str = node.get_metadata_str(mode=MetadataMode.EMBED)
llm_metadata_str = node.get_metadata_str(mode=MetadataMode.LLM)
# use the longest metadata str for splitting
if len(embed_metadata_str) > len(llm_metadata_str):
metadata_str = embed_metadata_str
else:
metadata_str = llm_metadata_str
return metadata_str
def _parse_nodes(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
metadata_str = self._get_metadata_str(node)
splits = self.split_text_metadata_aware(
node.get_content(metadata_mode=MetadataMode.NONE),
metadata_str=metadata_str,
)
all_nodes.extend(
build_nodes_from_splits(splits, node, id_func=self.id_func)
)
return all_nodes
| MetadataAwareTextSplitter |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/component/button.py | {
"start": 1196,
"end": 1344
} | class ____(DiscordMessageComponentDict):
style: int
url: str
label: NotRequired[str]
disabled: NotRequired[bool]
| DiscordLinkButtonDict |
python | automl__auto-sklearn | autosklearn/pipeline/regression.py | {
"start": 729,
"end": 12452
} | class ____(RegressorMixin, BasePipeline):
"""This class implements the regression task.
It implements a pipeline, which includes one preprocessing step and one
regression algorithm. It can render a search space including all known
regression and preprocessing algorithms.
Contrary to the sklearn API it is not possible to enumerate the
possible parameters in the __init__ function because we only know the
available regressors at runtime. For this reason the user must
specifiy the parameters by passing an instance of
ConfigSpace.configuration_space.Configuration.
Parameters
----------
config : ConfigSpace.configuration_space.Configuration
The configuration to evaluate.
random_state : Optional[int | RandomState]
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance
used by `np.random`.
Attributes
----------
_estimator : The underlying scikit-learn regression model. This
variable is assigned after a call to the
:meth:`autosklearn.pipeline.regression.SimpleRegressionPipeline.fit`
method.
_preprocessor : The underlying scikit-learn preprocessing algorithm. This
variable is only assigned if a preprocessor is specified and
after a call to the
:meth:`autosklearn.pipeline.regression.SimpleRegressionPipeline.fit`
method.
See also
--------
References
----------
Examples
--------
"""
def __init__(
self,
config: Optional[Configuration] = None,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
steps=None,
dataset_properties=None,
include=None,
exclude=None,
random_state: Optional[Union[int, np.random.RandomState]] = None,
init_params=None,
):
self._output_dtype = np.float32
if dataset_properties is None:
dataset_properties = dict()
if "target_type" not in dataset_properties:
dataset_properties["target_type"] = "regression"
super().__init__(
feat_type=feat_type,
config=config,
steps=steps,
dataset_properties=dataset_properties,
include=include,
exclude=exclude,
random_state=random_state,
init_params=init_params,
)
def fit_estimator(self, X, y, **fit_params):
self.y_max_ = np.nanmax(y)
self.y_min_ = np.nanmin(y)
return super(SimpleRegressionPipeline, self).fit_estimator(X, y, **fit_params)
def iterative_fit(self, X, y, n_iter=1, **fit_params):
self.y_max_ = np.nanmax(y)
self.y_min_ = np.nanmin(y)
return super(SimpleRegressionPipeline, self).iterative_fit(
X, y, n_iter=n_iter, **fit_params
)
def predict(self, X, batch_size=None):
"""Predict the classes using the selected model.
Predicted values are capped to approximately the maximum and minimum labels
seen during training.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
batch_size: int or None, defaults to None
batch_size controls whether the pipeline will be
called on small chunks of the data. Useful when calling the
predict method on the whole array X results in a MemoryError.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Returns the predicted values"""
y = super().predict(X, batch_size=batch_size)
if self.y_max_ > 0:
y[y > (2 * self.y_max_)] = 2 * self.y_max_
elif self.y_max_ < 0:
y[y > (0.5 * self.y_max_)] = 0.5 * self.y_max_
if self.y_min_ < 0:
y[y < (2 * self.y_min_)] = 2 * self.y_min_
elif self.y_min_ > 0:
y[y < (0.5 * self.y_min_)] = 0.5 * self.y_min_
return y
def _get_hyperparameter_search_space(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
include=None,
exclude=None,
dataset_properties=None,
):
"""Return the configuration space for the CASH problem.
Parameters
----------
include : dict
If include is given, only the modules specified for nodes
are used. Specify them by their module name; e.g., to include
only the SVM use :python:`include={'regressor':['svr']}`.
exclude : dict
If exclude is given, only the components specified for nodes
are used. Specify them by their module name; e.g., to include
all regressors except the SVM use
:python:`exclude=['regressor': 'svr']`.
Returns
-------
cs : ConfigSpace.configuration_space.Configuration
The configuration space describing the SimpleRegressionClassifier.
"""
cs = ConfigurationSpace()
if dataset_properties is None or not isinstance(dataset_properties, dict):
dataset_properties = dict()
if "target_type" not in dataset_properties:
dataset_properties["target_type"] = "regression"
if dataset_properties["target_type"] != "regression":
dataset_properties["target_type"] = "regression"
if "sparse" not in dataset_properties:
# This dataset is probably dense
dataset_properties["sparse"] = False
cs = self._get_base_search_space(
cs=cs,
feat_type=feat_type,
dataset_properties=dataset_properties,
exclude=exclude,
include=include,
pipeline=self.steps,
)
regressors = cs.get_hyperparameter("regressor:__choice__").choices
preprocessors = cs.get_hyperparameter("feature_preprocessor:__choice__").choices
available_regressors = self._final_estimator.get_available_components(
dataset_properties
)
possible_default_regressor = copy.copy(list(available_regressors.keys()))
default = cs.get_hyperparameter("regressor:__choice__").default_value
del possible_default_regressor[possible_default_regressor.index(default)]
# A regressor which can handle sparse data after the densifier is
# forbidden for memory issues
for key in regressors:
if (
SPARSE
in available_regressors[key].get_properties(dataset_properties=None)[
"input"
]
):
if "densifier" in preprocessors:
while True:
try:
forb_reg = ForbiddenEqualsClause(
cs.get_hyperparameter("regressor:__choice__"), key
)
forb_fpp = ForbiddenEqualsClause(
cs.get_hyperparameter(
"feature_preprocessor:__choice__"
),
"densifier",
)
cs.add_forbidden_clause(
ForbiddenAndConjunction(forb_reg, forb_fpp)
)
# Success
break
except ValueError:
# Change the default and try again
try:
default = possible_default_regressor.pop()
except IndexError:
raise ValueError(
"Cannot find a legal default configuration."
)
cs.get_hyperparameter(
"regressor:__choice__"
).default_value = default
# which would take too long
# Combinations of tree-based models with feature learning:
regressors_ = [
"adaboost",
"ard_regression",
"decision_tree",
"extra_trees",
"gaussian_process",
"gradient_boosting",
"k_nearest_neighbors",
"libsvm_svr",
"mlp",
"random_forest",
]
feature_learning_ = ["kitchen_sinks", "kernel_pca", "nystroem_sampler"]
for r, f in product(regressors_, feature_learning_):
if r not in regressors:
continue
if f not in preprocessors:
continue
while True:
try:
cs.add_forbidden_clause(
ForbiddenAndConjunction(
ForbiddenEqualsClause(
cs.get_hyperparameter("regressor:__choice__"), r
),
ForbiddenEqualsClause(
cs.get_hyperparameter(
"feature_preprocessor:__choice__"
),
f,
),
)
)
break
except KeyError:
break
except ValueError:
# Change the default and try again
try:
default = possible_default_regressor.pop()
except IndexError:
raise ValueError("Cannot find a legal default configuration.")
cs.get_hyperparameter(
"regressor:__choice__"
).default_value = default
self.configuration_space = cs
self.dataset_properties = dataset_properties
return cs
def _get_estimator_components(self):
return regression_components._regressors
def _get_pipeline_steps(
self, dataset_properties, feat_type: Optional[FEAT_TYPE_TYPE] = None
):
steps = []
default_dataset_properties = {"target_type": "regression"}
if dataset_properties is not None and isinstance(dataset_properties, dict):
default_dataset_properties.update(dataset_properties)
steps.extend(
[
[
"data_preprocessor",
DataPreprocessorChoice(
feat_type=feat_type,
dataset_properties=default_dataset_properties,
random_state=self.random_state,
),
],
[
"feature_preprocessor",
feature_preprocessing_components.FeaturePreprocessorChoice(
feat_type=feat_type,
dataset_properties=default_dataset_properties,
random_state=self.random_state,
),
],
[
"regressor",
regression_components.RegressorChoice(
feat_type=feat_type,
dataset_properties=default_dataset_properties,
random_state=self.random_state,
),
],
]
)
return steps
def _get_estimator_hyperparameter_name(self):
return "regressor"
| SimpleRegressionPipeline |
python | zostera__django-bootstrap4 | example/app/forms.py | {
"start": 2772,
"end": 3146
} | class ____(BaseFormSet):
def add_fields(self, form, index):
super().add_fields(form, index)
def clean(self):
super().clean()
raise forms.ValidationError("This error was added to show the non form errors styling")
ContactFormSet = formset_factory(TestForm, formset=ContactBaseFormSet, extra=2, max_num=4, validate_max=True)
| ContactBaseFormSet |
python | doocs__leetcode | solution/1100-1199/1187.Make Array Strictly Increasing/Solution.py | {
"start": 0,
"end": 708
} | class ____:
def makeArrayIncreasing(self, arr1: List[int], arr2: List[int]) -> int:
arr2.sort()
m = 0
for x in arr2:
if m == 0 or x != arr2[m - 1]:
arr2[m] = x
m += 1
arr2 = arr2[:m]
arr = [-inf] + arr1 + [inf]
n = len(arr)
f = [inf] * n
f[0] = 0
for i in range(1, n):
if arr[i - 1] < arr[i]:
f[i] = f[i - 1]
j = bisect_left(arr2, arr[i])
for k in range(1, min(i - 1, j) + 1):
if arr[i - k - 1] < arr2[j - k]:
f[i] = min(f[i], f[i - k - 1] + k)
return -1 if f[n - 1] >= inf else f[n - 1]
| Solution |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 211466,
"end": 212384
} | class ____(object):
# https://github.com/argoproj/argo-events/blob/master/api/sensor.md#argoproj.io/v1alpha1.Template
def __init__(self):
tree = lambda: defaultdict(tree)
self.payload = tree()
def service_account_name(self, service_account_name):
self.payload["serviceAccountName"] = service_account_name
return self
def metadata(self, object_meta):
self.payload["metadata"] = object_meta.to_json()
return self
def container(self, container):
# Luckily this can simply be V1Container and we are spared from writing more
# boilerplate - https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Container.md.
self.payload["container"] = container
return self
def to_json(self):
return self.payload
def __str__(self):
return json.dumps(self.to_json(), indent=4)
| SensorTemplate |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/object_identity.py | {
"start": 4092,
"end": 4626
} | class ____(ObjectIdentityDictionary):
"""Like weakref.WeakKeyDictionary, but compares objects with "is"."""
__slots__ = ["__weakref__"]
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len(list(self._storage))
def __iter__(self):
keys = self._storage.keys()
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
del self[key]
else:
yield unwrapped
| ObjectIdentityWeakKeyDictionary |
python | ipython__ipython | tests/test_interactiveshell.py | {
"start": 21077,
"end": 21826
} | class ____(unittest.TestCase):
@onlyif_unicode_paths
def setUp(self):
self.BASETESTDIR = tempfile.mkdtemp()
self.TESTDIR = join(self.BASETESTDIR, "åäö")
os.mkdir(self.TESTDIR)
with open(
join(self.TESTDIR, "åäötestscript.py"), "w", encoding="utf-8"
) as sfile:
sfile.write("pass\n")
self.oldpath = os.getcwd()
os.chdir(self.TESTDIR)
self.fname = "åäötestscript.py"
def tearDown(self):
os.chdir(self.oldpath)
shutil.rmtree(self.BASETESTDIR)
@onlyif_unicode_paths
def test_1(self):
"""Test safe_execfile with non-ascii path"""
ip.safe_execfile(self.fname, {}, raise_exceptions=True)
| TestSafeExecfileNonAsciiPath |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-firestore/destination_firestore/destination.py | {
"start": 341,
"end": 3357
} | class ____(Destination):
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""
TODO
Reads the input stream of messages, config, and catalog to write data to the destination.
This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received
in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been
successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing,
then the source is given the last state message output from this method as the starting point of the next sync.
:param config: dict of JSON configuration matching the configuration declared in spec.json
:param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the
destination
:param input_messages: The stream of input messages received from the source
:return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs
"""
writer = FirestoreWriter(**config)
for configured_stream in configured_catalog.streams:
if configured_stream.destination_sync_mode == DestinationSyncMode.overwrite:
writer.purge(configured_stream.stream.name)
for message in input_messages:
if message.type == Type.STATE:
yield message
elif message.type == Type.RECORD:
record = message.record
writer.write(record.stream, record.data)
else:
# ignore other message types for now
continue
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the destination with the needed permissions
e.g: if a provided API token or password can be used to connect and write to the destination.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this destination, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
writer = FirestoreWriter(**config)
writer.check()
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
| DestinationFirestore |
python | openai__openai-python | src/openai/types/evals/run_cancel_response.py | {
"start": 12365,
"end": 12623
} | class ____(BaseModel):
failed: int
"""Number of tests failed for this criteria."""
passed: int
"""Number of tests passed for this criteria."""
testing_criteria: str
"""A description of the testing criteria."""
| PerTestingCriteriaResult |
python | huggingface__transformers | src/transformers/models/xlm/modeling_xlm.py | {
"start": 47720,
"end": 52966
} | class ____(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.transformer = XLMModel(config)
self.sequence_summary = XLMSequenceSummary(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[dict[str, torch.Tensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
"""
)
| XLMForSequenceClassification |
python | django__django | tests/model_fields/test_autofield.py | {
"start": 243,
"end": 352
} | class ____(IntegerFieldTests):
model = AutoModel
rel_db_type_class = models.IntegerField
| AutoFieldTests |
python | pytorch__pytorch | torch/testing/_internal/quantization_torch_package_models.py | {
"start": 477,
"end": 951
} | class ____(nn.Module):
def __init__(self, N):
super().__init__()
self.child = LinearReluFunctionalChild(N)
self.w1 = nn.Parameter(torch.empty(N, N))
self.b1 = nn.Parameter(torch.zeros(N))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = self.child(x)
x = torch.nn.functional.linear(x, self.w1, self.b1)
x = torch.nn.functional.relu(x)
return x
| LinearReluFunctional |
python | h5py__h5py | h5py/tests/test_file.py | {
"start": 23249,
"end": 24369
} | class ____(TestCase):
"""
Feature: A File object can be retrieved from any child object,
via the .file property
"""
def test_property(self):
""" File object can be retrieved from subgroup """
fname = self.mktemp()
hfile = File(fname, 'w')
try:
hfile2 = hfile['/'].file
self.assertEqual(hfile, hfile2)
finally:
hfile.close()
def test_close(self):
""" All retrieved File objects are closed at the same time """
fname = self.mktemp()
hfile = File(fname, 'w')
grp = hfile.create_group('foo')
hfile2 = grp.file
hfile3 = hfile['/'].file
hfile2.close()
self.assertFalse(hfile)
self.assertFalse(hfile2)
self.assertFalse(hfile3)
def test_mode(self):
""" Retrieved File objects have a meaningful mode attribute """
hfile = File(self.mktemp(), 'w')
try:
grp = hfile.create_group('foo')
self.assertEqual(grp.file.mode, hfile.mode)
finally:
hfile.close()
| TestFileProperty |
python | langchain-ai__langchain | libs/langchain/langchain_classic/retrievers/document_compressors/listwise_rerank.py | {
"start": 1437,
"end": 5224
} | class ____(BaseDocumentCompressor):
"""Document compressor that uses `Zero-Shot Listwise Document Reranking`.
Adapted from: https://arxiv.org/pdf/2305.02156.pdf
`LLMListwiseRerank` uses a language model to rerank a list of documents based on
their relevance to a query.
!!! note
Requires that underlying model implement `with_structured_output`.
Example usage:
```python
from langchain_classic.retrievers.document_compressors.listwise_rerank import (
LLMListwiseRerank,
)
from langchain_core.documents import Document
from langchain_openai import ChatOpenAI
documents = [
Document("Sally is my friend from school"),
Document("Steve is my friend from home"),
Document("I didn't always like yogurt"),
Document("I wonder why it's called football"),
Document("Where's waldo"),
]
reranker = LLMListwiseRerank.from_llm(
llm=ChatOpenAI(model="gpt-3.5-turbo"), top_n=3
)
compressed_docs = reranker.compress_documents(documents, "Who is steve")
assert len(compressed_docs) == 3
assert "Steve" in compressed_docs[0].page_content
```
"""
reranker: Runnable[dict, list[Document]]
"""LLM-based reranker to use for filtering documents. Expected to take in a dict
with 'documents: Sequence[Document]' and 'query: str' keys and output a
List[Document]."""
top_n: int = 3
"""Number of documents to return."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Callbacks | None = None,
) -> Sequence[Document]:
"""Filter down documents based on their relevance to the query."""
results = self.reranker.invoke(
{"documents": documents, "query": query},
config={"callbacks": callbacks},
)
return results[: self.top_n]
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> "LLMListwiseRerank":
"""Create a LLMListwiseRerank document compressor from a language model.
Args:
llm: The language model to use for filtering. **Must implement
BaseLanguageModel.with_structured_output().**
prompt: The prompt to use for the filter.
kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMListwiseRerank document compressor that uses the given language model.
"""
if type(llm).with_structured_output == BaseLanguageModel.with_structured_output:
msg = (
f"llm of type {type(llm)} does not implement `with_structured_output`."
)
raise ValueError(msg)
class RankDocuments(BaseModel):
"""Rank the documents by their relevance to the user question.
Rank from most to least relevant.
"""
ranked_document_ids: list[int] = Field(
...,
description=(
"The integer IDs of the documents, sorted from most to least "
"relevant to the user question."
),
)
_prompt = prompt if prompt is not None else _DEFAULT_PROMPT
reranker = RunnablePassthrough.assign(
ranking=RunnableLambda(_get_prompt_input)
| _prompt
| llm.with_structured_output(RankDocuments),
) | RunnableLambda(_parse_ranking)
return cls(reranker=reranker, **kwargs)
| LLMListwiseRerank |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 7408,
"end": 7764
} | class ____(IncrementalShopifySubstream):
parent_stream_class = Orders
slice_key = "order_id"
data_field = "transactions"
cursor_field = "created_at"
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
order_id = stream_slice["order_id"]
return f"orders/{order_id}/{self.data_field}.json"
| Transactions |
python | getsentry__sentry-python | tests/integrations/litellm/test_litellm.py | {
"start": 2827,
"end": 23123
} | class ____:
def __init__(self, model="text-embedding-ada-002", data=None, usage=None):
self.model = model
self.data = data or [MockEmbeddingData()]
self.usage = usage or MockUsage(
prompt_tokens=5, completion_tokens=0, total_tokens=5
)
self.object = "list"
def model_dump(self):
return {
"model": self.model,
"data": [
{"embedding": d.embedding, "index": d.index, "object": d.object}
for d in self.data
],
"usage": {
"prompt_tokens": self.usage.prompt_tokens,
"completion_tokens": self.usage.completion_tokens,
"total_tokens": self.usage.total_tokens,
},
"object": self.object,
}
@pytest.mark.parametrize(
"send_default_pii, include_prompts",
[
(True, True),
(True, False),
(False, True),
(False, False),
],
)
def test_nonstreaming_chat_completion(
sentry_init, capture_events, send_default_pii, include_prompts
):
sentry_init(
integrations=[LiteLLMIntegration(include_prompts=include_prompts)],
traces_sample_rate=1.0,
send_default_pii=send_default_pii,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
mock_response = MockCompletionResponse()
with start_transaction(name="litellm test"):
# Simulate what litellm does: call input callback, then success callback
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
assert len(events) == 1
(event,) = events
assert event["type"] == "transaction"
assert event["transaction"] == "litellm test"
assert len(event["spans"]) == 1
(span,) = event["spans"]
assert span["op"] == OP.GEN_AI_CHAT
assert span["description"] == "chat gpt-3.5-turbo"
assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo"
assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo"
assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai"
assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat"
if send_default_pii and include_prompts:
assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"]
assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"]
else:
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"]
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"]
assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10
assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30
@pytest.mark.parametrize(
"send_default_pii, include_prompts",
[
(True, True),
(True, False),
(False, True),
(False, False),
],
)
def test_streaming_chat_completion(
sentry_init, capture_events, send_default_pii, include_prompts
):
sentry_init(
integrations=[LiteLLMIntegration(include_prompts=include_prompts)],
traces_sample_rate=1.0,
send_default_pii=send_default_pii,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
mock_response = MockCompletionResponse()
with start_transaction(name="litellm test"):
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
"stream": True,
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
assert len(events) == 1
(event,) = events
assert event["type"] == "transaction"
assert len(event["spans"]) == 1
(span,) = event["spans"]
assert span["op"] == OP.GEN_AI_CHAT
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True
def test_embeddings_create(sentry_init, capture_events, clear_litellm_cache):
"""
Test that litellm.embedding() calls are properly instrumented.
This test calls the actual litellm.embedding() function (not just callbacks)
to ensure proper integration testing.
"""
sentry_init(
integrations=[LiteLLMIntegration(include_prompts=True)],
traces_sample_rate=1.0,
send_default_pii=True,
)
events = capture_events()
mock_response = MockEmbeddingResponse()
# Mock within the test to ensure proper ordering with cache clearing
with mock.patch(
"litellm.openai_chat_completions.make_sync_openai_embedding_request"
) as mock_http:
# The function returns (headers, response)
mock_http.return_value = ({}, mock_response)
with start_transaction(name="litellm test"):
response = litellm.embedding(
model="text-embedding-ada-002",
input="Hello, world!",
api_key="test-key", # Provide a fake API key to avoid authentication errors
)
# Allow time for callbacks to complete (they may run in separate threads)
time.sleep(0.1)
# Response is processed by litellm, so just check it exists
assert response is not None
assert len(events) == 1
(event,) = events
assert event["type"] == "transaction"
assert len(event["spans"]) == 1
(span,) = event["spans"]
assert span["op"] == OP.GEN_AI_EMBEDDINGS
assert span["description"] == "embeddings text-embedding-ada-002"
assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings"
assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5
assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-ada-002"
# Check that embeddings input is captured (it's JSON serialized)
embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]
assert json.loads(embeddings_input) == ["Hello, world!"]
def test_embeddings_create_with_list_input(
sentry_init, capture_events, clear_litellm_cache
):
"""Test embedding with list input."""
sentry_init(
integrations=[LiteLLMIntegration(include_prompts=True)],
traces_sample_rate=1.0,
send_default_pii=True,
)
events = capture_events()
mock_response = MockEmbeddingResponse()
# Mock within the test to ensure proper ordering with cache clearing
with mock.patch(
"litellm.openai_chat_completions.make_sync_openai_embedding_request"
) as mock_http:
# The function returns (headers, response)
mock_http.return_value = ({}, mock_response)
with start_transaction(name="litellm test"):
response = litellm.embedding(
model="text-embedding-ada-002",
input=["First text", "Second text", "Third text"],
api_key="test-key", # Provide a fake API key to avoid authentication errors
)
# Allow time for callbacks to complete (they may run in separate threads)
time.sleep(0.1)
# Response is processed by litellm, so just check it exists
assert response is not None
assert len(events) == 1
(event,) = events
assert event["type"] == "transaction"
assert len(event["spans"]) == 1
(span,) = event["spans"]
assert span["op"] == OP.GEN_AI_EMBEDDINGS
assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings"
# Check that list of embeddings input is captured (it's JSON serialized)
embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]
assert json.loads(embeddings_input) == [
"First text",
"Second text",
"Third text",
]
def test_embeddings_no_pii(sentry_init, capture_events, clear_litellm_cache):
"""Test that PII is not captured when disabled."""
sentry_init(
integrations=[LiteLLMIntegration(include_prompts=True)],
traces_sample_rate=1.0,
send_default_pii=False, # PII disabled
)
events = capture_events()
mock_response = MockEmbeddingResponse()
# Mock within the test to ensure proper ordering with cache clearing
with mock.patch(
"litellm.openai_chat_completions.make_sync_openai_embedding_request"
) as mock_http:
# The function returns (headers, response)
mock_http.return_value = ({}, mock_response)
with start_transaction(name="litellm test"):
response = litellm.embedding(
model="text-embedding-ada-002",
input="Hello, world!",
api_key="test-key", # Provide a fake API key to avoid authentication errors
)
# Allow time for callbacks to complete (they may run in separate threads)
time.sleep(0.1)
# Response is processed by litellm, so just check it exists
assert response is not None
assert len(events) == 1
(event,) = events
assert event["type"] == "transaction"
assert len(event["spans"]) == 1
(span,) = event["spans"]
assert span["op"] == OP.GEN_AI_EMBEDDINGS
# Check that embeddings input is NOT captured when PII is disabled
assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"]
def test_exception_handling(sentry_init, capture_events):
sentry_init(
integrations=[LiteLLMIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
with start_transaction(name="litellm test"):
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
}
_input_callback(kwargs)
_failure_callback(
kwargs,
Exception("API rate limit reached"),
datetime.now(),
datetime.now(),
)
# Should have error event and transaction
assert len(events) >= 1
# Find the error event
error_events = [e for e in events if e.get("level") == "error"]
assert len(error_events) == 1
def test_span_origin(sentry_init, capture_events):
sentry_init(
integrations=[LiteLLMIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
mock_response = MockCompletionResponse()
with start_transaction(name="litellm test"):
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
(event,) = events
assert event["contexts"]["trace"]["origin"] == "manual"
assert event["spans"][0]["origin"] == "auto.ai.litellm"
def test_multiple_providers(sentry_init, capture_events):
"""Test that the integration correctly identifies different providers."""
sentry_init(
integrations=[LiteLLMIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
# Test with different model prefixes
test_cases = [
("gpt-3.5-turbo", "openai"),
("claude-3-opus-20240229", "anthropic"),
("gemini/gemini-pro", "gemini"),
]
for model, _ in test_cases:
mock_response = MockCompletionResponse(model=model)
with start_transaction(name=f"test {model}"):
kwargs = {
"model": model,
"messages": messages,
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
assert len(events) == len(test_cases)
for i in range(len(test_cases)):
span = events[i]["spans"][0]
# The provider should be detected by litellm.get_llm_provider
assert SPANDATA.GEN_AI_SYSTEM in span["data"]
def test_additional_parameters(sentry_init, capture_events):
"""Test that additional parameters are captured."""
sentry_init(
integrations=[LiteLLMIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
mock_response = MockCompletionResponse()
with start_transaction(name="litellm test"):
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
"temperature": 0.7,
"max_tokens": 100,
"top_p": 0.9,
"frequency_penalty": 0.5,
"presence_penalty": 0.5,
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
(event,) = events
(span,) = event["spans"]
assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7
assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100
assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9
assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5
assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5
def test_litellm_specific_parameters(sentry_init, capture_events):
"""Test that LiteLLM-specific parameters are captured."""
sentry_init(
integrations=[LiteLLMIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
mock_response = MockCompletionResponse()
with start_transaction(name="litellm test"):
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
"api_base": "https://custom-api.example.com",
"api_version": "2023-01-01",
"custom_llm_provider": "custom_provider",
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
(event,) = events
(span,) = event["spans"]
assert span["data"]["gen_ai.litellm.api_base"] == "https://custom-api.example.com"
assert span["data"]["gen_ai.litellm.api_version"] == "2023-01-01"
assert span["data"]["gen_ai.litellm.custom_llm_provider"] == "custom_provider"
def test_no_integration(sentry_init, capture_events):
"""Test that when integration is not enabled, callbacks don't break."""
sentry_init(
traces_sample_rate=1.0,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
mock_response = MockCompletionResponse()
with start_transaction(name="litellm test"):
# When the integration isn't enabled, the callbacks should exit early
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
}
# These should not crash, just do nothing
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
(event,) = events
# Should still have the transaction, but no child spans since integration is off
assert event["type"] == "transaction"
assert len(event.get("spans", [])) == 0
def test_response_without_usage(sentry_init, capture_events):
"""Test handling of responses without usage information."""
sentry_init(
integrations=[LiteLLMIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
# Create a mock response without usage
mock_response = type(
"obj",
(object,),
{
"model": "gpt-3.5-turbo",
"choices": [MockChoice()],
},
)()
with start_transaction(name="litellm test"):
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
(event,) = events
(span,) = event["spans"]
# Span should still be created even without usage info
assert span["op"] == OP.GEN_AI_CHAT
assert span["description"] == "chat gpt-3.5-turbo"
def test_integration_setup(sentry_init):
"""Test that the integration sets up the callbacks correctly."""
sentry_init(
integrations=[LiteLLMIntegration()],
traces_sample_rate=1.0,
)
# Check that callbacks are registered
assert _input_callback in (litellm.input_callback or [])
assert _success_callback in (litellm.success_callback or [])
assert _failure_callback in (litellm.failure_callback or [])
def test_message_dict_extraction(sentry_init, capture_events):
"""Test that response messages are properly extracted with dict() fallback."""
sentry_init(
integrations=[LiteLLMIntegration(include_prompts=True)],
traces_sample_rate=1.0,
send_default_pii=True,
)
events = capture_events()
messages = [{"role": "user", "content": "Hello!"}]
# Create a message that has dict() method instead of model_dump()
class DictMessage:
def __init__(self):
self.role = "assistant"
self.content = "Response"
self.tool_calls = None
def dict(self):
return {"role": self.role, "content": self.content}
mock_response = MockCompletionResponse(choices=[MockChoice(message=DictMessage())])
with start_transaction(name="litellm test"):
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
(event,) = events
(span,) = event["spans"]
# Should have extracted the response message
assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"]
def test_litellm_message_truncation(sentry_init, capture_events):
"""Test that large messages are truncated properly in LiteLLM integration."""
sentry_init(
integrations=[LiteLLMIntegration(include_prompts=True)],
traces_sample_rate=1.0,
send_default_pii=True,
)
events = capture_events()
large_content = (
"This is a very long message that will exceed our size limits. " * 1000
)
messages = [
{"role": "user", "content": "small message 1"},
{"role": "assistant", "content": large_content},
{"role": "user", "content": large_content},
{"role": "assistant", "content": "small message 4"},
{"role": "user", "content": "small message 5"},
]
mock_response = MockCompletionResponse()
with start_transaction(name="litellm test"):
kwargs = {
"model": "gpt-3.5-turbo",
"messages": messages,
}
_input_callback(kwargs)
_success_callback(
kwargs,
mock_response,
datetime.now(),
datetime.now(),
)
assert len(events) > 0
tx = events[0]
assert tx["type"] == "transaction"
chat_spans = [
span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT
]
assert len(chat_spans) > 0
chat_span = chat_spans[0]
assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"]
messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
assert isinstance(messages_data, str)
parsed_messages = json.loads(messages_data)
assert isinstance(parsed_messages, list)
assert len(parsed_messages) == 2
assert "small message 4" in str(parsed_messages[0])
assert "small message 5" in str(parsed_messages[1])
assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5
| MockEmbeddingResponse |
python | Pylons__pyramid | tests/test_router.py | {
"start": 127,
"end": 60806
} | class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.registry = self.config.registry
def tearDown(self):
testing.tearDown()
def _registerRouteRequest(self, name):
from pyramid.interfaces import IRouteRequest
from pyramid.request import route_request_iface
iface = route_request_iface(name)
self.registry.registerUtility(iface, IRouteRequest, name=name)
return iface
def _connectRoute(self, name, path, factory=None):
from pyramid.interfaces import IRoutesMapper
from pyramid.urldispatch import RoutesMapper
mapper = self.registry.queryUtility(IRoutesMapper)
if mapper is None:
mapper = RoutesMapper()
self.registry.registerUtility(mapper, IRoutesMapper)
return mapper.connect(name, path, factory)
def _registerLogger(self):
from pyramid.interfaces import IDebugLogger
logger = DummyLogger()
self.registry.registerUtility(logger, IDebugLogger)
return logger
def _registerSettings(self, **kw):
settings = {
'debug_authorization': False,
'debug_notfound': False,
'debug_routematch': False,
}
settings.update(kw)
self.registry.settings = settings
def _registerTraverserFactory(
self,
context,
view_name='',
subpath=None,
traversed=None,
virtual_root=None,
virtual_root_path=None,
raise_error=None,
**kw,
):
from pyramid.interfaces import ITraverser
if virtual_root is None:
virtual_root = context
if subpath is None:
subpath = []
if traversed is None:
traversed = []
if virtual_root_path is None:
virtual_root_path = []
class DummyTraverserFactory:
def __init__(self, root):
self.root = root
def __call__(self, request):
if raise_error:
raise raise_error
values = {
'root': self.root,
'context': context,
'view_name': view_name,
'subpath': subpath,
'traversed': traversed,
'virtual_root': virtual_root,
'virtual_root_path': virtual_root_path,
}
kw.update(values)
return kw
self.registry.registerAdapter(
DummyTraverserFactory, (None,), ITraverser, name=''
)
def _registerView(self, app, name, classifier, req_iface, ctx_iface):
from pyramid.interfaces import IView
self.registry.registerAdapter(
app, (classifier, req_iface, ctx_iface), IView, name
)
def _registerEventListener(self, iface):
L = []
def listener(event):
L.append(event)
self.registry.registerHandler(listener, (iface,))
return L
def _registerRootFactory(self, val):
rootfactory = DummyRootFactory(val)
from pyramid.interfaces import IRootFactory
self.registry.registerUtility(rootfactory, IRootFactory)
return rootfactory
def _getTargetClass(self):
from pyramid.router import Router
return Router
def _makeOne(self):
klass = self._getTargetClass()
return klass(self.registry)
def _mockFinishRequest(self, router):
"""
Mock :meth:`pyramid.router.Router.finish_request` to be a no-op. This
prevents :prop:`pyramid.request.Request.context` from being removed, so
we can write assertions against it.
"""
def mock_finish_request(request):
pass
router.finish_request = mock_finish_request
def _makeEnviron(self, **extras):
environ = {
'wsgi.url_scheme': 'http',
'SERVER_NAME': 'localhost',
'SERVER_PORT': '8080',
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/',
}
environ.update(extras)
return environ
def test_ctor_registry_has_no_settings(self):
self.registry.settings = None
router = self._makeOne()
self.assertEqual(router.debug_notfound, False)
self.assertEqual(router.debug_routematch, False)
self.assertFalse('debug_notfound' in router.__dict__)
self.assertFalse('debug_routematch' in router.__dict__)
def test_root_policy(self):
context = DummyContext()
self._registerTraverserFactory(context)
rootfactory = self._registerRootFactory('abc')
router = self._makeOne()
self.assertEqual(router.root_policy, rootfactory)
def test_request_factory(self):
from pyramid.interfaces import IRequestFactory
class DummyRequestFactory:
pass
self.registry.registerUtility(DummyRequestFactory, IRequestFactory)
router = self._makeOne()
self.assertEqual(router.request_factory, DummyRequestFactory)
def test_tween_factories(self):
from pyramid.config.tweens import Tweens
from pyramid.interfaces import IResponse, ITweens, IViewClassifier
from pyramid.response import Response
tweens = Tweens()
self.registry.registerUtility(tweens, ITweens)
L = []
def tween_factory1(handler, registry):
L.append((handler, registry))
def wrapper(request):
request.environ['handled'].append('one')
return handler(request)
wrapper.name = 'one'
wrapper.child = handler
return wrapper
def tween_factory2(handler, registry):
L.append((handler, registry))
def wrapper(request):
request.environ['handled'] = ['two']
return handler(request)
wrapper.name = 'two'
wrapper.child = handler
return wrapper
tweens.add_implicit('one', tween_factory1)
tweens.add_implicit('two', tween_factory2)
router = self._makeOne()
self.assertEqual(router.handle_request.name, 'two')
self.assertEqual(router.handle_request.child.name, 'one')
self.assertEqual(
router.handle_request.child.child.__name__, 'handle_request'
)
context = DummyContext()
self._registerTraverserFactory(context)
environ = self._makeEnviron()
view = DummyView('abc')
self._registerView(
self.config.derive_view(view), '', IViewClassifier, None, None
)
start_response = DummyStartResponse()
def make_response(s):
return Response(s)
router.registry.registerAdapter(make_response, (str,), IResponse)
app_iter = router(environ, start_response)
self.assertEqual(app_iter, [b'abc'])
self.assertEqual(start_response.status, '200 OK')
self.assertEqual(environ['handled'], ['two', 'one'])
def test_call_traverser_default(self):
from pyramid.httpexceptions import HTTPNotFound
environ = self._makeEnviron()
logger = self._registerLogger()
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPNotFound, router, environ, start_response)
self.assertTrue('/' in why.args[0], why)
self.assertFalse('debug_notfound' in why.args[0])
self.assertEqual(len(logger.messages), 0)
def test_traverser_raises_notfound_class(self):
from pyramid.httpexceptions import HTTPNotFound
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(context, raise_error=HTTPNotFound)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(HTTPNotFound, router, environ, start_response)
def test_traverser_raises_notfound_instance(self):
from pyramid.httpexceptions import HTTPNotFound
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(
context, raise_error=HTTPNotFound('foo')
)
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPNotFound, router, environ, start_response)
self.assertTrue('foo' in why.args[0], why)
def test_traverser_raises_forbidden_class(self):
from pyramid.httpexceptions import HTTPForbidden
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(context, raise_error=HTTPForbidden)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(HTTPForbidden, router, environ, start_response)
def test_traverser_raises_forbidden_instance(self):
from pyramid.httpexceptions import HTTPForbidden
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(
context, raise_error=HTTPForbidden('foo')
)
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPForbidden, router, environ, start_response)
self.assertTrue('foo' in why.args[0], why)
def test_call_no_view_registered_no_isettings(self):
from pyramid.httpexceptions import HTTPNotFound
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(context)
logger = self._registerLogger()
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPNotFound, router, environ, start_response)
self.assertTrue('/' in why.args[0], why)
self.assertFalse('debug_notfound' in why.args[0])
self.assertEqual(len(logger.messages), 0)
def test_call_no_view_registered_debug_notfound_false(self):
from pyramid.httpexceptions import HTTPNotFound
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(context)
logger = self._registerLogger()
self._registerSettings(debug_notfound=False)
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPNotFound, router, environ, start_response)
self.assertTrue('/' in why.args[0], why)
self.assertFalse('debug_notfound' in why.args[0])
self.assertEqual(len(logger.messages), 0)
def test_call_no_view_registered_debug_notfound_true(self):
from pyramid.httpexceptions import HTTPNotFound
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(context)
self._registerSettings(debug_notfound=True)
logger = self._registerLogger()
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPNotFound, router, environ, start_response)
self.assertTrue(
"debug_notfound of url http://localhost:8080/; " in why.args[0]
)
self.assertTrue("view_name: '', subpath: []" in why.args[0])
self.assertTrue('http://localhost:8080' in why.args[0], why)
self.assertEqual(len(logger.messages), 1)
message = logger.messages[0]
self.assertTrue('of url http://localhost:8080' in message)
self.assertTrue("path_info: " in message)
self.assertTrue('DummyContext' in message)
self.assertTrue("view_name: ''" in message)
self.assertTrue("subpath: []" in message)
def test_call_view_returns_non_iresponse(self):
from pyramid.interfaces import IViewClassifier
context = DummyContext()
self._registerTraverserFactory(context)
environ = self._makeEnviron()
view = DummyView('abc')
self._registerView(
self.config.derive_view(view), '', IViewClassifier, None, None
)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(ValueError, router, environ, start_response)
def test_call_view_returns_adapted_response(self):
from pyramid.interfaces import IResponse, IViewClassifier
from pyramid.response import Response
context = DummyContext()
self._registerTraverserFactory(context)
environ = self._makeEnviron()
view = DummyView('abc')
self._registerView(
self.config.derive_view(view), '', IViewClassifier, None, None
)
router = self._makeOne()
start_response = DummyStartResponse()
def make_response(s):
return Response(s)
router.registry.registerAdapter(make_response, (str,), IResponse)
app_iter = router(environ, start_response)
self.assertEqual(app_iter, [b'abc'])
self.assertEqual(start_response.status, '200 OK')
def test_call_with_request_extensions(self):
from pyramid.interfaces import (
IRequest,
IRequestExtensions,
IViewClassifier,
)
from pyramid.request import Request
from pyramid.util import InstancePropertyHelper
context = DummyContext()
self._registerTraverserFactory(context)
class Extensions:
def __init__(self):
self.methods = {}
self.descriptors = {}
extensions = Extensions()
ext_method = lambda r: 'bar'
name, fn = InstancePropertyHelper.make_property(ext_method, name='foo')
extensions.descriptors[name] = fn
request = Request.blank('/')
request.request_iface = IRequest
request.registry = self.registry
def request_factory(environ):
return request
self.registry.registerUtility(extensions, IRequestExtensions)
environ = self._makeEnviron()
response = DummyResponse()
response.app_iter = ['Hello world']
view = DummyView(response)
self._registerView(
self.config.derive_view(view), '', IViewClassifier, None, None
)
router = self._makeOne()
router.request_factory = request_factory
start_response = DummyStartResponse()
router(environ, start_response)
self.assertEqual(view.request.foo, 'bar')
def test_call_view_registered_nonspecific_default_path(self):
from pyramid.interfaces import IViewClassifier
context = DummyContext()
self._registerTraverserFactory(context)
response = DummyResponse()
response.app_iter = ['Hello world']
view = DummyView(response)
environ = self._makeEnviron()
self._registerView(
self.config.derive_view(view), '', IViewClassifier, None, None
)
self._registerRootFactory(context)
router = self._makeOne()
self._mockFinishRequest(router)
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ['Hello world'])
self.assertEqual(start_response.headers, ())
self.assertEqual(start_response.status, '200 OK')
request = view.request
self.assertEqual(request.view_name, '')
self.assertEqual(request.subpath, [])
self.assertEqual(request.context, context)
self.assertEqual(request.root, context)
def test_call_view_registered_nonspecific_nondefault_path_and_subpath(
self,
):
from pyramid.interfaces import IViewClassifier
context = DummyContext()
self._registerTraverserFactory(
context, view_name='foo', subpath=['bar'], traversed=['context']
)
self._registerRootFactory(context)
response = DummyResponse()
response.app_iter = ['Hello world']
view = DummyView(response)
environ = self._makeEnviron()
self._registerView(view, 'foo', IViewClassifier, None, None)
router = self._makeOne()
self._mockFinishRequest(router)
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ['Hello world'])
self.assertEqual(start_response.headers, ())
self.assertEqual(start_response.status, '200 OK')
request = view.request
self.assertEqual(request.view_name, 'foo')
self.assertEqual(request.subpath, ['bar'])
self.assertEqual(request.context, context)
self.assertEqual(request.root, context)
def test_call_view_registered_specific_success(self):
from zope.interface import Interface, directlyProvides
class IContext(Interface):
pass
from pyramid.interfaces import IRequest, IViewClassifier
context = DummyContext()
directlyProvides(context, IContext)
self._registerTraverserFactory(context)
self._registerRootFactory(context)
response = DummyResponse()
response.app_iter = ['Hello world']
view = DummyView(response)
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, IContext)
router = self._makeOne()
self._mockFinishRequest(router)
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ['Hello world'])
self.assertEqual(start_response.headers, ())
self.assertEqual(start_response.status, '200 OK')
request = view.request
self.assertEqual(request.view_name, '')
self.assertEqual(request.subpath, [])
self.assertEqual(request.context, context)
self.assertEqual(request.root, context)
def test_call_view_registered_specific_fail(self):
from zope.interface import Interface, directlyProvides
from pyramid.httpexceptions import HTTPNotFound
from pyramid.interfaces import IViewClassifier
class IContext(Interface):
pass
class INotContext(Interface):
pass
from pyramid.interfaces import IRequest
context = DummyContext()
directlyProvides(context, INotContext)
self._registerTraverserFactory(context, subpath=[''])
response = DummyResponse()
view = DummyView(response)
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, IContext)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(HTTPNotFound, router, environ, start_response)
def test_call_view_raises_forbidden(self):
from zope.interface import Interface, directlyProvides
from pyramid.httpexceptions import HTTPForbidden
class IContext(Interface):
pass
from pyramid.interfaces import IRequest, IViewClassifier
context = DummyContext()
directlyProvides(context, IContext)
self._registerTraverserFactory(context, subpath=[''])
response = DummyResponse()
view = DummyView(
response, raise_exception=HTTPForbidden("unauthorized")
)
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, IContext)
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPForbidden, router, environ, start_response)
self.assertEqual(why.args[0], 'unauthorized')
def test_call_view_raises_notfound(self):
from zope.interface import Interface, directlyProvides
class IContext(Interface):
pass
from pyramid.httpexceptions import HTTPNotFound
from pyramid.interfaces import IRequest, IViewClassifier
context = DummyContext()
directlyProvides(context, IContext)
self._registerTraverserFactory(context, subpath=[''])
response = DummyResponse()
view = DummyView(response, raise_exception=HTTPNotFound("notfound"))
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, IContext)
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPNotFound, router, environ, start_response)
self.assertEqual(why.args[0], 'notfound')
def test_call_view_raises_response_cleared(self):
from zope.interface import Interface, directlyProvides
from pyramid.interfaces import IExceptionViewClassifier
class IContext(Interface):
pass
from pyramid.interfaces import IRequest, IViewClassifier
context = DummyContext()
directlyProvides(context, IContext)
self._registerTraverserFactory(context, subpath=[''])
def view(context, request):
request.response.a = 1
raise KeyError
def exc_view(context, request):
self.assertFalse(hasattr(request.response, 'a'))
request.response.body = b'OK'
return request.response
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, IContext)
self._registerView(
exc_view, '', IExceptionViewClassifier, IRequest, KeyError
)
router = self._makeOne()
start_response = DummyStartResponse()
itera = router(environ, start_response)
self.assertEqual(itera, [b'OK'])
def test_call_request_has_response_callbacks(self):
from zope.interface import Interface, directlyProvides
class IContext(Interface):
pass
from pyramid.interfaces import IRequest, IViewClassifier
context = DummyContext()
directlyProvides(context, IContext)
self._registerTraverserFactory(context, subpath=[''])
response = DummyResponse('200 OK')
def view(context, request):
def callback(request, response):
response.called_back = True
request.add_response_callback(callback)
return response
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, IContext)
router = self._makeOne()
start_response = DummyStartResponse()
router(environ, start_response)
self.assertEqual(response.called_back, True)
def test_finish_request_when_view_succeeds(self):
from zope.interface import Interface, directlyProvides
class IContext(Interface):
pass
from pyramid.interfaces import IRequest, IViewClassifier
context = DummyContext()
directlyProvides(context, IContext)
self._registerTraverserFactory(context, subpath=[''])
response = DummyResponse('200 OK')
def view(context, request):
def callback(request):
request.environ['called_back'] = True
request.add_finished_callback(callback)
request.environ['request'] = request
return response
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, IContext)
router = self._makeOne()
start_response = DummyStartResponse()
router(environ, start_response)
self.assertEqual(environ['called_back'], True)
self.assertFalse(hasattr(environ['request'], 'context'))
def test_finish_request_when_view_raises(self):
from zope.interface import Interface, directlyProvides
class IContext(Interface):
pass
from pyramid.interfaces import IRequest, IViewClassifier
context = DummyContext()
directlyProvides(context, IContext)
self._registerTraverserFactory(context, subpath=[''])
def view(context, request):
def callback(request):
request.environ['called_back'] = True
request.add_finished_callback(callback)
request.environ['request'] = request
raise NotImplementedError
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, IContext)
router = self._makeOne()
start_response = DummyStartResponse()
exc_raised(NotImplementedError, router, environ, start_response)
self.assertEqual(environ['called_back'], True)
self.assertFalse(hasattr(environ['request'], 'context'))
def test_call_request_factory_raises(self):
# making sure finally doesnt barf when a request cannot be created
environ = self._makeEnviron()
router = self._makeOne()
def dummy_request_factory(environ):
raise NotImplementedError
router.request_factory = dummy_request_factory
start_response = DummyStartResponse()
exc_raised(NotImplementedError, router, environ, start_response)
def test_call_eventsends(self):
from pyramid.interfaces import (
IBeforeTraversal,
IContextFound,
INewRequest,
INewResponse,
IViewClassifier,
)
context = DummyContext()
self._registerTraverserFactory(context)
response = DummyResponse()
response.app_iter = ['Hello world']
view = DummyView(response)
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, None, None)
request_events = self._registerEventListener(INewRequest)
beforetraversal_events = self._registerEventListener(IBeforeTraversal)
context_found_events = self._registerEventListener(IContextFound)
response_events = self._registerEventListener(INewResponse)
router = self._makeOne()
self._mockFinishRequest(router)
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(len(request_events), 1)
self.assertEqual(request_events[0].request.environ, environ)
self.assertEqual(len(beforetraversal_events), 1)
self.assertEqual(beforetraversal_events[0].request.environ, environ)
self.assertEqual(len(context_found_events), 1)
self.assertEqual(context_found_events[0].request.environ, environ)
self.assertEqual(context_found_events[0].request.context, context)
self.assertEqual(len(response_events), 1)
self.assertEqual(response_events[0].response, response)
self.assertEqual(response_events[0].request.context, context)
self.assertEqual(result, response.app_iter)
def test_call_newrequest_evllist_exc_can_be_caught_by_exceptionview(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
INewRequest,
IRequest,
)
context = DummyContext()
self._registerTraverserFactory(context)
environ = self._makeEnviron()
def listener(event):
raise KeyError
self.registry.registerHandler(listener, (INewRequest,))
exception_response = DummyResponse()
exception_response.app_iter = ["Hello, world"]
exception_view = DummyView(exception_response)
environ = self._makeEnviron()
self._registerView(
exception_view, '', IExceptionViewClassifier, IRequest, KeyError
)
router = self._makeOne()
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, exception_response.app_iter)
def test_call_route_matches_and_has_factory(self):
from pyramid.interfaces import IViewClassifier
logger = self._registerLogger()
self._registerSettings(debug_routematch=True)
self._registerRouteRequest('foo')
root = object()
def factory(request):
return root
route = self._connectRoute('foo', 'archives/:action/:article', factory)
route.predicates = [DummyPredicate()]
context = DummyContext()
self._registerTraverserFactory(context)
response = DummyResponse()
response.app_iter = ['Hello world']
view = DummyView(response)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
self._registerView(view, '', IViewClassifier, None, None)
self._registerRootFactory(context)
router = self._makeOne()
self._mockFinishRequest(router)
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ['Hello world'])
self.assertEqual(start_response.headers, ())
self.assertEqual(start_response.status, '200 OK')
request = view.request
self.assertEqual(request.view_name, '')
self.assertEqual(request.subpath, [])
self.assertEqual(request.context, context)
self.assertEqual(request.root, root)
matchdict = {'action': 'action1', 'article': 'article1'}
self.assertEqual(request.matchdict, matchdict)
self.assertEqual(request.matched_route.name, 'foo')
self.assertEqual(len(logger.messages), 1)
self.assertTrue(
logger.messages[0].startswith(
"route matched for url http://localhost:8080"
"/archives/action1/article1; "
"route_name: 'foo', "
"path_info: "
)
)
self.assertTrue("predicates: 'predicate'" in logger.messages[0])
def test_call_route_match_miss_debug_routematch(self):
from pyramid.httpexceptions import HTTPNotFound
logger = self._registerLogger()
self._registerSettings(debug_routematch=True)
self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article')
context = DummyContext()
self._registerTraverserFactory(context)
environ = self._makeEnviron(PATH_INFO='/wontmatch')
self._registerRootFactory(context)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(HTTPNotFound, router, environ, start_response)
self.assertEqual(len(logger.messages), 1)
self.assertEqual(
logger.messages[0],
'no route matched for url http://localhost:8080/wontmatch',
)
def test_call_route_matches_doesnt_overwrite_subscriber_iface(self):
from zope.interface import Interface, alsoProvides
from pyramid.interfaces import INewRequest, IViewClassifier
self._registerRouteRequest('foo')
class IFoo(Interface):
pass
def listener(event):
alsoProvides(event.request, IFoo)
self.registry.registerHandler(listener, (INewRequest,))
root = object()
def factory(request):
return root
self._connectRoute('foo', 'archives/:action/:article', factory)
context = DummyContext()
self._registerTraverserFactory(context)
response = DummyResponse()
response.app_iter = ['Hello world']
view = DummyView(response)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
self._registerView(view, '', IViewClassifier, None, None)
self._registerRootFactory(context)
router = self._makeOne()
self._mockFinishRequest(router)
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ['Hello world'])
self.assertEqual(start_response.headers, ())
self.assertEqual(start_response.status, '200 OK')
request = view.request
self.assertEqual(request.view_name, '')
self.assertEqual(request.subpath, [])
self.assertEqual(request.context, context)
self.assertEqual(request.root, root)
matchdict = {'action': 'action1', 'article': 'article1'}
self.assertEqual(request.matchdict, matchdict)
self.assertEqual(request.matched_route.name, 'foo')
self.assertTrue(IFoo.providedBy(request))
def test_root_factory_raises_notfound(self):
from zope.interface import Interface, directlyProvides
from pyramid.httpexceptions import HTTPNotFound
from pyramid.interfaces import IRootFactory
def rootfactory(request):
raise HTTPNotFound('from root factory')
self.registry.registerUtility(rootfactory, IRootFactory)
class IContext(Interface):
pass
context = DummyContext()
directlyProvides(context, IContext)
environ = self._makeEnviron()
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPNotFound, router, environ, start_response)
self.assertTrue('from root factory' in why.args[0])
def test_root_factory_raises_forbidden(self):
from zope.interface import Interface, directlyProvides
from pyramid.httpexceptions import HTTPForbidden
from pyramid.interfaces import IRootFactory
def rootfactory(request):
raise HTTPForbidden('from root factory')
self.registry.registerUtility(rootfactory, IRootFactory)
class IContext(Interface):
pass
context = DummyContext()
directlyProvides(context, IContext)
environ = self._makeEnviron()
router = self._makeOne()
start_response = DummyStartResponse()
why = exc_raised(HTTPForbidden, router, environ, start_response)
self.assertTrue('from root factory' in why.args[0])
def test_root_factory_exception_propagating(self):
from zope.interface import Interface, directlyProvides
from pyramid.interfaces import IRootFactory
def rootfactory(request):
raise RuntimeError()
self.registry.registerUtility(rootfactory, IRootFactory)
class IContext(Interface):
pass
context = DummyContext()
directlyProvides(context, IContext)
environ = self._makeEnviron()
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(RuntimeError, router, environ, start_response)
def test_traverser_exception_propagating(self):
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(context, raise_error=RuntimeError())
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(RuntimeError, router, environ, start_response)
def test_call_view_exception_propagating(self):
from zope.interface import Interface, directlyProvides
class IContext(Interface):
pass
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IRequestFactory,
IViewClassifier,
)
def rfactory(environ):
return request
self.registry.registerUtility(rfactory, IRequestFactory)
from pyramid.request import Request
request = Request.blank('/')
context = DummyContext()
directlyProvides(context, IContext)
self._registerTraverserFactory(context, subpath=[''])
response = DummyResponse()
response.app_iter = ['OK']
error = RuntimeError()
view = DummyView(response, raise_exception=error)
environ = self._makeEnviron()
def exception_view(context, request):
self.assertEqual(request.exc_info[0], RuntimeError)
return response
self._registerView(view, '', IViewClassifier, IRequest, IContext)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
RuntimeError,
)
router = self._makeOne()
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ['OK'])
# exc_info and exception should still be around on the request after
# the excview tween has run (see
# https://github.com/Pylons/pyramid/issues/1223)
self.assertEqual(request.exception, error)
self.assertEqual(request.exc_info[:2], (RuntimeError, error))
def test_call_view_raises_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
response = DummyResponse()
exception_response = DummyResponse()
exception_response.app_iter = ["Hello, world"]
view = DummyView(response, raise_exception=RuntimeError)
def exception_view(context, request):
self.assertEqual(request.exception.__class__, RuntimeError)
return exception_response
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, None)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
RuntimeError,
)
router = self._makeOne()
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ["Hello, world"])
def test_call_view_raises_super_exception_sub_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
class SuperException(Exception):
pass
class SubException(SuperException):
pass
response = DummyResponse()
exception_response = DummyResponse()
exception_response.app_iter = ["Hello, world"]
view = DummyView(response, raise_exception=SuperException)
exception_view = DummyView(exception_response)
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, None)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
SubException,
)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(SuperException, router, environ, start_response)
def test_call_view_raises_sub_exception_super_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
class SuperException(Exception):
pass
class SubException(SuperException):
pass
response = DummyResponse()
exception_response = DummyResponse()
exception_response.app_iter = ["Hello, world"]
view = DummyView(response, raise_exception=SubException)
exception_view = DummyView(exception_response)
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, None)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
SuperException,
)
router = self._makeOne()
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ["Hello, world"])
def test_call_view_raises_exception_another_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
class MyException(Exception):
pass
class AnotherException(Exception):
pass
response = DummyResponse()
exception_response = DummyResponse()
exception_response.app_iter = ["Hello, world"]
view = DummyView(response, raise_exception=MyException)
exception_view = DummyView(exception_response)
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, None)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
AnotherException,
)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(MyException, router, environ, start_response)
def test_root_factory_raises_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IRootFactory,
)
def rootfactory(request):
raise RuntimeError()
self.registry.registerUtility(rootfactory, IRootFactory)
exception_response = DummyResponse()
exception_response.app_iter = ["Hello, world"]
exception_view = DummyView(exception_response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
RuntimeError,
)
environ = self._makeEnviron()
router = self._makeOne()
start_response = DummyStartResponse()
app_iter = router(environ, start_response)
self.assertEqual(app_iter, ["Hello, world"])
def test_traverser_raises_exception_view(self):
from pyramid.interfaces import IExceptionViewClassifier, IRequest
environ = self._makeEnviron()
context = DummyContext()
self._registerTraverserFactory(context, raise_error=RuntimeError())
exception_response = DummyResponse()
exception_response.app_iter = ["Hello, world"]
exception_view = DummyView(exception_response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
RuntimeError,
)
router = self._makeOne()
start_response = DummyStartResponse()
result = router(environ, start_response)
self.assertEqual(result, ["Hello, world"])
def test_exception_view_returns_non_iresponse(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
environ = self._makeEnviron()
response = DummyResponse()
view = DummyView(response, raise_exception=RuntimeError)
self._registerView(
self.config.derive_view(view), '', IViewClassifier, IRequest, None
)
exception_view = DummyView(None)
self._registerView(
self.config.derive_view(exception_view),
'',
IExceptionViewClassifier,
IRequest,
RuntimeError,
)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(ValueError, router, environ, start_response)
def test_call_route_raises_route_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IViewClassifier,
)
req_iface = self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=RuntimeError)
self._registerView(view, '', IViewClassifier, req_iface, None)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view = DummyView(response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
req_iface,
RuntimeError,
)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
start_response = DummyStartResponse()
router = self._makeOne()
result = router(environ, start_response)
self.assertEqual(result, ["Hello, world"])
def test_call_view_raises_exception_route_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
req_iface = self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=RuntimeError)
self._registerView(view, '', IViewClassifier, IRequest, None)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view = DummyView(response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
req_iface,
RuntimeError,
)
environ = self._makeEnviron()
start_response = DummyStartResponse()
router = self._makeOne()
self.assertRaises(RuntimeError, router, environ, start_response)
def test_call_route_raises_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
req_iface = self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=RuntimeError)
self._registerView(view, '', IViewClassifier, req_iface, None)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view = DummyView(response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
RuntimeError,
)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
start_response = DummyStartResponse()
router = self._makeOne()
result = router(environ, start_response)
self.assertEqual(result, ["Hello, world"])
def test_call_route_raises_super_exception_sub_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
class SuperException(Exception):
pass
class SubException(SuperException):
pass
req_iface = self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=SuperException)
self._registerView(view, '', IViewClassifier, req_iface, None)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view = DummyView(response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
SubException,
)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
start_response = DummyStartResponse()
router = self._makeOne()
self.assertRaises(SuperException, router, environ, start_response)
def test_call_route_raises_sub_exception_super_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
class SuperException(Exception):
pass
class SubException(SuperException):
pass
req_iface = self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=SubException)
self._registerView(view, '', IViewClassifier, req_iface, None)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view = DummyView(response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
SuperException,
)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
start_response = DummyStartResponse()
router = self._makeOne()
result = router(environ, start_response)
self.assertEqual(result, ["Hello, world"])
def test_call_route_raises_exception_another_exception_view(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
class MyException(Exception):
pass
class AnotherException(Exception):
pass
req_iface = self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=MyException)
self._registerView(view, '', IViewClassifier, req_iface, None)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view = DummyView(response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
AnotherException,
)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
start_response = DummyStartResponse()
router = self._makeOne()
self.assertRaises(MyException, router, environ, start_response)
def test_call_route_raises_exception_view_specializing(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
req_iface = self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=RuntimeError)
self._registerView(view, '', IViewClassifier, req_iface, None)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view = DummyView(response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
IRequest,
RuntimeError,
)
response_spec = DummyResponse()
response_spec.app_iter = ["Hello, special world"]
exception_view_spec = DummyView(response_spec)
self._registerView(
exception_view_spec,
'',
IExceptionViewClassifier,
req_iface,
RuntimeError,
)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
start_response = DummyStartResponse()
router = self._makeOne()
result = router(environ, start_response)
self.assertEqual(result, ["Hello, special world"])
def test_call_route_raises_exception_view_another_route(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IViewClassifier,
)
req_iface = self._registerRouteRequest('foo')
another_req_iface = self._registerRouteRequest('bar')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=RuntimeError)
self._registerView(view, '', IViewClassifier, req_iface, None)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view = DummyView(response)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
another_req_iface,
RuntimeError,
)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
start_response = DummyStartResponse()
router = self._makeOne()
self.assertRaises(RuntimeError, router, environ, start_response)
def test_call_view_raises_exception_view_route(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
req_iface = self._registerRouteRequest('foo')
response = DummyResponse()
exception_response = DummyResponse()
exception_response.app_iter = ["Hello, world"]
view = DummyView(response, raise_exception=RuntimeError)
exception_view = DummyView(exception_response)
environ = self._makeEnviron()
self._registerView(view, '', IViewClassifier, IRequest, None)
self._registerView(
exception_view,
'',
IExceptionViewClassifier,
req_iface,
RuntimeError,
)
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(RuntimeError, router, environ, start_response)
def test_call_view_raises_predicate_mismatch(self):
from pyramid.exceptions import PredicateMismatch
from pyramid.interfaces import IRequest, IViewClassifier
view = DummyView(DummyResponse(), raise_exception=PredicateMismatch)
self._registerView(view, '', IViewClassifier, IRequest, None)
environ = self._makeEnviron()
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(PredicateMismatch, router, environ, start_response)
def test_call_view_predicate_mismatch_doesnt_hide_views(self):
from pyramid.exceptions import PredicateMismatch
from pyramid.interfaces import IRequest, IResponse, IViewClassifier
from pyramid.response import Response
class BaseContext:
pass
class DummyContext(BaseContext):
pass
context = DummyContext()
self._registerTraverserFactory(context)
view = DummyView(DummyResponse(), raise_exception=PredicateMismatch)
self._registerView(view, '', IViewClassifier, IRequest, DummyContext)
good_view = DummyView('abc')
self._registerView(
self.config.derive_view(good_view),
'',
IViewClassifier,
IRequest,
BaseContext,
)
router = self._makeOne()
def make_response(s):
return Response(s)
router.registry.registerAdapter(make_response, (str,), IResponse)
environ = self._makeEnviron()
start_response = DummyStartResponse()
app_iter = router(environ, start_response)
self.assertEqual(app_iter, [b'abc'])
def test_call_view_multiple_predicate_mismatches_dont_hide_views(self):
from zope.interface import Interface, implementer
from pyramid.exceptions import PredicateMismatch
from pyramid.interfaces import IRequest, IResponse, IViewClassifier
from pyramid.response import Response
class IBaseContext(Interface):
pass
class IContext(IBaseContext):
pass
@implementer(IContext)
class DummyContext:
pass
context = DummyContext()
self._registerTraverserFactory(context)
view1 = DummyView(DummyResponse(), raise_exception=PredicateMismatch)
self._registerView(view1, '', IViewClassifier, IRequest, DummyContext)
view2 = DummyView(DummyResponse(), raise_exception=PredicateMismatch)
self._registerView(view2, '', IViewClassifier, IRequest, IContext)
good_view = DummyView('abc')
self._registerView(
self.config.derive_view(good_view),
'',
IViewClassifier,
IRequest,
IBaseContext,
)
router = self._makeOne()
def make_response(s):
return Response(s)
router.registry.registerAdapter(make_response, (str,), IResponse)
environ = self._makeEnviron()
start_response = DummyStartResponse()
app_iter = router(environ, start_response)
self.assertEqual(app_iter, [b'abc'])
def test_call_view_predicate_mismatch_doesnt_find_unrelated_views(self):
from zope.interface import Interface, implementer
from pyramid.exceptions import PredicateMismatch
from pyramid.interfaces import IRequest, IViewClassifier
class IContext(Interface):
pass
class IOtherContext(Interface):
pass
@implementer(IContext)
class DummyContext:
pass
context = DummyContext()
self._registerTraverserFactory(context)
view = DummyView(DummyResponse(), raise_exception=PredicateMismatch)
self._registerView(view, '', IViewClassifier, IRequest, DummyContext)
please_dont_call_me_view = DummyView('abc')
self._registerView(
self.config.derive_view(please_dont_call_me_view),
'',
IViewClassifier,
IRequest,
IOtherContext,
)
router = self._makeOne()
environ = self._makeEnviron()
router = self._makeOne()
start_response = DummyStartResponse()
self.assertRaises(PredicateMismatch, router, environ, start_response)
def test_custom_execution_policy(self):
from pyramid.interfaces import IExecutionPolicy
from pyramid.request import Request
from pyramid.response import Response
registry = self.config.registry
def dummy_policy(environ, router):
return Response(status=200, body=b'foo')
registry.registerUtility(dummy_policy, IExecutionPolicy)
router = self._makeOne()
resp = Request.blank('/').get_response(router)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.body, b'foo')
def test_execution_policy_bubbles_exception(self):
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
IViewClassifier,
)
class Exception1(Exception):
pass
class Exception2(Exception):
pass
req_iface = self._registerRouteRequest('foo')
self._connectRoute('foo', 'archives/:action/:article', None)
view = DummyView(DummyResponse(), raise_exception=Exception1)
self._registerView(view, '', IViewClassifier, req_iface, None)
exception_view1 = DummyView(
DummyResponse(), raise_exception=Exception2
)
self._registerView(
exception_view1, '', IExceptionViewClassifier, IRequest, Exception1
)
response = DummyResponse()
response.app_iter = ["Hello, world"]
exception_view2 = DummyView(response)
self._registerView(
exception_view2, '', IExceptionViewClassifier, IRequest, Exception2
)
environ = self._makeEnviron(PATH_INFO='/archives/action1/article1')
start_response = DummyStartResponse()
router = self._makeOne()
self.assertRaises(Exception2, lambda: router(environ, start_response))
def test_request_context_with_statement(self):
from pyramid.interfaces import IExecutionPolicy
from pyramid.request import Request
from pyramid.response import Response
from pyramid.threadlocal import get_current_request
registry = self.config.registry
result = []
def dummy_policy(environ, router):
with router.request_context(environ):
result.append(get_current_request())
result.append(get_current_request())
return Response(status=200, body=b'foo')
registry.registerUtility(dummy_policy, IExecutionPolicy)
router = self._makeOne()
resp = Request.blank('/test_path').get_response(router)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.body, b'foo')
self.assertEqual(result[0].path_info, '/test_path')
self.assertEqual(result[1], None)
def test_request_context_manually(self):
from pyramid.interfaces import IExecutionPolicy
from pyramid.request import Request
from pyramid.response import Response
from pyramid.threadlocal import get_current_request
registry = self.config.registry
result = []
def dummy_policy(environ, router):
ctx = router.request_context(environ)
ctx.begin()
result.append(get_current_request())
ctx.end()
result.append(get_current_request())
return Response(status=200, body=b'foo')
registry.registerUtility(dummy_policy, IExecutionPolicy)
router = self._makeOne()
resp = Request.blank('/test_path').get_response(router)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.body, b'foo')
self.assertEqual(result[0].path_info, '/test_path')
self.assertEqual(result[1], None)
| TestRouter |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/utils/utils.py | {
"start": 351,
"end": 2259
} | class ____(PydanticBaseModel):
class Config:
"""Pydantic currently does not support nullable required fields. Here, we use a workaround to
allow this behavior.
See https://github.com/samuelcolvin/pydantic/issues/1270#issuecomment-729555558
"""
@staticmethod
def schema_extra(schema, model):
for prop, value in schema.get("properties", {}).items():
# retrieve right field from alias or name
field = next(x for x in model.model_fields.values() if x.alias == prop)
if field.allow_none:
# only one type e.g. {'type': 'integer'}
if "type" in value:
value["anyOf"] = [{"type": value.pop("type")}]
# only one $ref e.g. from other model
elif "$ref" in value:
if issubclass(field.type_, PydanticBaseModel):
# add 'title' in schema to have the exact same behaviour as the rest
value["title"] = field.type_.__config__.title or field.type_.__name__
value["anyOf"] = [{"$ref": value.pop("$ref")}]
value["anyOf"].append({"type": "null"})
def create_definition_ref(definition: str, version: str = SupportedKubernetes.V1_18.value) -> str:
return (
f"https://kubernetesjsonschema.dev/v{version}/_definitions.json#/definitions/{definition}"
)
def create_json_schema_conditionals(
enum_type_to_config_name_mapping: dict[Enum, str],
) -> list[Mapping[str, Any]]:
return [
{
"if": {
"properties": {"type": {"const": enum_type}},
},
"then": {"properties": {"config": {"required": [config_name]}}},
}
for (enum_type, config_name) in enum_type_to_config_name_mapping.items()
]
| BaseModel |
python | getsentry__sentry | tests/sentry/incidents/endpoints/serializers/test_query_subscription.py | {
"start": 2847,
"end": 4137
} | class ____(TestCase):
def test_serialize(self) -> None:
snuba_query = SnubaQuery.objects.create(
type=SnubaQuery.Type.ERROR.value,
dataset="events",
query="test query",
aggregate="count()",
time_window=60,
resolution=60,
)
SnubaQueryEventType.objects.create(
snuba_query=snuba_query, type=SnubaQueryEventType.EventType.ERROR.value
)
subscription = QuerySubscription.objects.create(
project=self.project,
status=QuerySubscription.Status.ACTIVE.value,
subscription_id="123",
snuba_query=snuba_query,
)
result = serialize(subscription)
assert result == {
"id": str(subscription.id),
"status": QuerySubscription.Status.ACTIVE.value,
"subscription": "123",
"snubaQuery": {
"id": str(snuba_query.id),
"dataset": "events",
"query": "test query",
"aggregate": "count()",
"timeWindow": 60,
"environment": None,
"eventTypes": ["error"],
"extrapolationMode": "unknown",
},
}
| TestQuerySubscriptionSerializer |
python | huggingface__transformers | src/transformers/models/musicgen_melody/modeling_musicgen_melody.py | {
"start": 8235,
"end": 13434
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: Optional[float] = 0.0,
is_decoder: Optional[bool] = False,
bias: Optional[bool] = True,
is_causal: Optional[bool] = False,
config: Optional[MusicgenMelodyConfig] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_layer from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
| MusicgenMelodyAttention |
python | networkx__networkx | networkx/exception.py | {
"start": 2317,
"end": 2446
} | class ____(NetworkXException):
"""Exception raised by algorithms not implemented for a type of graph."""
| NetworkXNotImplemented |
python | walkccc__LeetCode | solutions/631. Design Excel Sum Formula/631.py | {
"start": 47,
"end": 129
} | class ____:
val: int = 0
posCount: dict[tuple[int, int], int] | None = None
| Cell |
python | has2k1__plotnine | plotnine/geoms/geom_raster.py | {
"start": 599,
"end": 6266
} | class ____(geom):
"""
Rasterized Rectangles specified using center points
{usage}
Parameters
----------
{common_parameters}
hjust : float, default=0.5
Horizontal justification for the rectangle at point `x`.
Default is 0.5, which centers the rectangle horizontally.
Must be in the range `[0, 1]`.
vjust : float, default=0.5
Vertical justification for the rectangle at point `y`
Default is 0.5, which centers the rectangle vertically.
Must be in the range `[0, 1]`.
interpolation : str, default=None
How to calculate values between the center points of
adjacent rectangles. The default is `None`{.py} not to
interpolate. Allowed values are:
```python
"antialiased"
"nearest"
"bilinear"
"bicubic"
"spline16"
"spline36"
"hanning"
"hamming"
"hermite"
"kaiser"
"quadric"
"catrom"
"gaussian"
"bessel"
"mitchell"
"sinc"
"lanczos"
"blackman"
```
filterrad : float, default=4.0
The filter radius for filters that have a radius parameter, i.e.
when interpolation is one of: `sinc`, `lanczos`, `blackman`.
Must be a number greater than zero.
See Also
--------
plotnine.geom_rect
plotnine.geom_tile
"""
DEFAULT_AES = {"alpha": 1, "fill": "#333333"}
REQUIRED_AES = {"x", "y"}
NON_MISSING_AES = {"fill", "xmin", "xmax", "ymin", "ymax"}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
"vjust": 0.5,
"hjust": 0.5,
"interpolation": None,
"filterrad": 4.0,
"raster": True,
}
draw_legend = staticmethod(geom_polygon.draw_legend)
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
# Silently accept:
# 1. interpolate
# 2. bool values for interpolation
if "interpolate" in kwargs:
kwargs["interpolation"] = kwargs.pop("interpolate")
if isinstance(kwargs.get("interpolation"), bool):
if kwargs["interpolation"] is True:
kwargs["interpolation"] = "bilinear"
else:
kwargs["interpolation"] = None
super().__init__(mapping, data, **kwargs)
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
hjust = self.params["hjust"]
vjust = self.params["vjust"]
precision = np.sqrt(np.finfo(float).eps)
x_diff = np.diff(np.sort(data["x"].unique()))
if len(x_diff) == 0:
w = 1
elif np.any(np.abs(np.diff(x_diff)) > precision):
warn(
"Raster pixels are placed at uneven horizontal intervals "
"and will be shifted. Consider using geom_tile() instead.",
PlotnineWarning,
)
w = x_diff.min()
else:
w = x_diff[0]
y_diff = np.diff(np.sort(data["y"].unique()))
if len(y_diff) == 0:
h = 1
elif np.any(np.abs(np.diff(y_diff)) > precision):
warn(
"Raster pixels are placed at uneven vertical intervals "
"and will be shifted. Consider using geom_tile() instead.",
PlotnineWarning,
)
h = y_diff.min()
else:
h = y_diff[0]
data["xmin"] = data["x"] - w * (1 - hjust)
data["xmax"] = data["x"] + w * hjust
data["ymin"] = data["y"] - h * (1 - vjust)
data["ymax"] = data["y"] + h * vjust
return data
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
"""
Plot all groups
"""
from matplotlib.colors import to_rgba_array
from matplotlib.image import AxesImage
if not isinstance(coord, coord_cartesian):
raise PlotnineError(
"geom_raster only works with cartesian coordinates"
)
data = coord.transform(data, panel_params)
x = data["x"].to_numpy().astype(float)
y = data["y"].to_numpy().astype(float)
facecolor = to_rgba_array(data["fill"].to_numpy())
facecolor[:, 3] = data["alpha"].to_numpy()
# Convert vector of data to flat image,
# figure out dimensions of raster on plot, and the colored
# indices.
x_pos = ((x - x.min()) / resolution(x, False)).astype(int)
y_pos = ((y - y.min()) / resolution(y, False)).astype(int)
nrow = y_pos.max() + 1
ncol = x_pos.max() + 1
yidx, xidx = nrow - y_pos - 1, x_pos
# Create and "color" the matrix.
# Any gaps left whites (ones) colors plus zero alpha values
# allows makes it possible to have a "neutral" interpolation
# into the gaps when intervals are uneven.
X = np.ones((nrow, ncol, 4))
X[:, :, 3] = 0
X[yidx, xidx] = facecolor
im = AxesImage(
ax,
data=X,
interpolation=self.params["interpolation"],
origin="upper",
extent=(
data["xmin"].min(),
data["xmax"].max(),
data["ymin"].min(),
data["ymax"].max(),
),
rasterized=self.params["raster"],
filterrad=self.params["filterrad"],
zorder=self.params["zorder"],
)
ax.add_image(im)
| geom_raster |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/ast_util_test.py | {
"start": 1153,
"end": 8781
} | class ____(test.TestCase):
def assertAstMatches(self, actual_node, expected_node_src):
expected_node = gast.parse('({})'.format(expected_node_src)).body[0]
msg = 'AST did not match expected:\n{}\nActual:\n{}'.format(
pretty_printer.fmt(expected_node),
pretty_printer.fmt(actual_node))
self.assertTrue(ast_util.matches(actual_node, expected_node), msg)
def setUp(self):
super(AstUtilTest, self).setUp()
self._invocation_counts = collections.defaultdict(lambda: 0)
def test_rename_symbols_basic(self):
node = parser.parse('a + b')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
source = parser.unparse(node, include_encoding_marker=False)
expected_node_src = 'renamed_a + b'
self.assertIsInstance(node.value.left.id, str)
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_rename_symbols_attributes(self):
node = parser.parse('b.c = b.c.d')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
def test_rename_symbols_nonlocal(self):
node = parser.parse('nonlocal a, b, c')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b'): qual_names.QN('renamed_b')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'nonlocal a, renamed_b, c')
def test_rename_symbols_global(self):
node = parser.parse('global a, b, c')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b'): qual_names.QN('renamed_b')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'global a, renamed_b, c')
def test_rename_symbols_annotations(self):
node = parser.parse('a[i]')
node = qual_names.resolve(node)
anno.setanno(node, 'foo', 'bar')
orig_anno = anno.getanno(node, 'foo')
node = ast_util.rename_symbols(node,
{qual_names.QN('a'): qual_names.QN('b')})
self.assertIs(anno.getanno(node, 'foo'), orig_anno)
def test_rename_symbols_function(self):
node = parser.parse('def f():\n pass')
node = ast_util.rename_symbols(node,
{qual_names.QN('f'): qual_names.QN('f1')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'def f1():\n pass')
def test_copy_clean(self):
node = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
setattr(node, '__foo', 'bar')
new_node = ast_util.copy_clean(node)
self.assertIsNot(new_node, node)
self.assertFalse(hasattr(new_node, '__foo'))
def test_copy_clean_preserves_annotations(self):
node = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
anno.setanno(node, 'foo', 'bar')
anno.setanno(node, 'baz', 1)
new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
self.assertEqual(anno.getanno(new_node, 'foo'), 'bar')
self.assertFalse(anno.hasanno(new_node, 'baz'))
def test_keywords_to_dict(self):
keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
d = ast_util.keywords_to_dict(keywords)
# Make sure we generate a usable dict node by attaching it to a variable and
# compiling everything.
node = parser.parse('def f(b): pass')
node.body.append(ast.Return(d))
result, _, _ = loader.load_ast(node)
self.assertDictEqual(result.f(3), {'a': 3, 'c': 1, 'd': 'e'})
def assertMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertTrue(ast_util.matches(node, pattern))
def assertNoMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertFalse(ast_util.matches(node, pattern))
def test_matches_symbols(self):
self.assertMatch('foo', '_')
self.assertNoMatch('foo()', '_')
self.assertMatch('foo + bar', 'foo + _')
self.assertNoMatch('bar + bar', 'foo + _')
self.assertNoMatch('foo - bar', 'foo + _')
def test_matches_function_args(self):
self.assertMatch('super(Foo, self).__init__(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super().__init__()', 'super(_).__init__(_)')
self.assertNoMatch('super(Foo, self).bar(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super(Foo, self).__init__()', 'super(Foo, _).__init__(_)')
self.assertNoMatch('super(Foo, self).__init__()',
'super(Bar, _).__init__(_)')
def _mock_apply_fn(self, target, source):
target = parser.unparse(target, include_encoding_marker=False)
source = parser.unparse(source, include_encoding_marker=False)
self._invocation_counts[(target.strip(), source.strip())] += 1
def test_apply_to_single_assignments_dynamic_unpack(self):
node = parser.parse('a, b, c = d')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd[0]'): 1,
('b', 'd[1]'): 1,
('c', 'd[2]'): 1,
})
def test_apply_to_single_assignments_static_unpack(self):
node = parser.parse('a, b, c = d, e, f')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd'): 1,
('b', 'e'): 1,
('c', 'f'): 1,
})
def test_parallel_walk(self):
src = """
def f(a):
return a + 1
"""
node = parser.parse(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_string_leaves(self):
src = """
def f(a):
global g
"""
node = parser.parse(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_inconsistent_trees(self):
node_1 = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
node_2 = parser.parse(
textwrap.dedent("""
def f(a):
return a + (a * 2)
"""))
node_3 = parser.parse(
textwrap.dedent("""
def f(a):
return a + 2
"""))
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_2):
pass
# There is not particular reason to reject trees that differ only in the
# value of a constant.
# TODO(mdan): This should probably be allowed.
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_3):
pass
def assertLambdaNodes(self, matching_nodes, expected_bodies):
self.assertEqual(len(matching_nodes), len(expected_bodies))
for node in matching_nodes:
self.assertIsInstance(node, gast.Lambda)
self.assertIn(
parser.unparse(node.body, include_encoding_marker=False).strip(),
expected_bodies)
if __name__ == '__main__':
test.main()
| AstUtilTest |
python | astral-sh__uv | crates/uv-python/fetch-download-metadata.py | {
"start": 3164,
"end": 3518
} | class ____(NamedTuple):
# The operating system, e.g. "linux", "macos", "windows".
platform: str
# The architecture, e.g. "x86_64", "aarch64".
arch: Arch
# The libc implementation, e.g. "gnu", "musl", "none".
libc: str
def key(self) -> PlatformTripleKey:
return (self.platform, self.arch.key(), self.libc)
| PlatformTriple |
python | buildout__buildout | zc.recipe.egg_/src/zc/recipe/egg/egg.py | {
"start": 807,
"end": 5513
} | class ____(object):
_WORKING_SET_CACHE_ATTR_NAME = '_zc_recipe_egg_working_set_cache'
def __init__(self, buildout, name, options):
self.buildout = buildout
self.name = name
self.options = options
b_options = buildout['buildout']
links = options.get('find-links', b_options['find-links'])
if links:
links = links.split()
options['find-links'] = '\n'.join(links)
else:
links = ()
self.links = links
index = options.get('index', b_options.get('index'))
if index is not None:
options['index'] = index
self.index = index
allow_hosts = b_options['allow-hosts']
allow_hosts = tuple([host.strip() for host in allow_hosts.split('\n')
if host.strip() != ''])
self.allow_hosts = allow_hosts
options['eggs-directory'] = b_options['eggs-directory']
options['_e'] = options['eggs-directory'] # backward compat.
options['develop-eggs-directory'] = b_options['develop-eggs-directory']
options['_d'] = options['develop-eggs-directory'] # backward compat.
def working_set(self, extra=()):
"""Separate method to just get the working set
This is intended for reuse by similar recipes.
"""
options = self.options
buildout_section = self.buildout['buildout']
# Backward compat. :(
options['executable'] = sys.executable
orig_distributions = [
r.strip()
for r in options.get('eggs', self.name).split('\n')
if r.strip()
]
ws = self._working_set(
distributions=orig_distributions + list(extra),
develop_eggs_dir=options['develop-eggs-directory'],
eggs_dir=options['eggs-directory'],
offline=(buildout_section.get('offline') == 'true'),
newest=(buildout_section.get('newest') == 'true'),
links=self.links,
index=self.index,
allow_hosts=self.allow_hosts,
allow_unknown_extras=bool_option(buildout_section, 'allow-unknown-extras')
)
return orig_distributions, ws
def install(self):
reqs, ws = self.working_set()
return ()
update = install
def _working_set(
self,
distributions,
eggs_dir,
develop_eggs_dir,
offline=False,
newest=True,
links=(),
index=None,
allow_hosts=('*',),
allow_unknown_extras=False,
):
"""Helper function to build a working set.
Return an instance of `pkg_resources.WorkingSet`.
Results are cached. The cache key is composed by all the arguments
passed to the function. See also `self._get_cache_storage()`.
"""
cache_storage = self._get_cache_storage()
cache_key = (
tuple(distributions),
eggs_dir,
develop_eggs_dir,
offline,
newest,
tuple(links),
index,
tuple(allow_hosts),
allow_unknown_extras,
)
if cache_key not in cache_storage:
if offline:
ws = zc.buildout.easy_install.working_set(
distributions,
[develop_eggs_dir, eggs_dir]
)
else:
ws = zc.buildout.easy_install.install(
distributions, eggs_dir,
links=links,
index=index,
path=[develop_eggs_dir],
newest=newest,
allow_hosts=allow_hosts,
allow_unknown_extras=allow_unknown_extras)
ws = zc.buildout.easy_install.sort_working_set(
ws, eggs_dir, develop_eggs_dir
)
cache_storage[cache_key] = ws
# `pkg_resources.WorkingSet` instances are mutable, so we need to return
# a copy.
return copy.deepcopy(cache_storage[cache_key])
def _get_cache_storage(self):
"""Return a mapping where to store generated working sets.
The cache storage is stored in an attribute of `self.buildout` with
name given by `self._WORKING_SET_CACHE_ATTR_NAME`.
"""
cache_storage = getattr(
self.buildout,
self._WORKING_SET_CACHE_ATTR_NAME,
None)
if cache_storage is None:
cache_storage = {}
setattr(
self.buildout,
self._WORKING_SET_CACHE_ATTR_NAME,
cache_storage)
return cache_storage
| Eggs |
python | django__django | django/contrib/gis/db/backends/postgis/introspection.py | {
"start": 128,
"end": 3185
} | class ____(DatabaseIntrospection):
postgis_oid_lookup = {} # Populated when introspection is performed.
ignored_tables = [
*DatabaseIntrospection.ignored_tables,
"geography_columns",
"geometry_columns",
"raster_columns",
"spatial_ref_sys",
"raster_overviews",
]
def get_field_type(self, data_type, description):
if not self.postgis_oid_lookup:
# Query PostgreSQL's pg_type table to determine the OID integers
# for the PostGIS data types used in reverse lookup (the integers
# may be different across versions). To prevent unnecessary
# requests upon connection initialization, the `data_types_reverse`
# dictionary isn't updated until introspection is performed here.
with self.connection.cursor() as cursor:
cursor.execute(
"SELECT oid, typname "
"FROM pg_type "
"WHERE typname IN ('geometry', 'geography')"
)
self.postgis_oid_lookup = dict(cursor.fetchall())
self.data_types_reverse.update(
(oid, "GeometryField") for oid in self.postgis_oid_lookup
)
return super().get_field_type(data_type, description)
def get_geometry_type(self, table_name, description):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type.
"""
with self.connection.cursor() as cursor:
cursor.execute(
"""
SELECT t.coord_dimension, t.srid, t.type FROM (
SELECT * FROM geometry_columns
UNION ALL
SELECT * FROM geography_columns
) AS t WHERE t.f_table_name = %s AND t.f_geometry_column = %s
""",
(table_name, description.name),
)
row = cursor.fetchone()
if not row:
raise Exception(
'Could not find a geometry or geography column for "%s"."%s"'
% (table_name, description.name)
)
dim, srid, field_type = row
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(field_type).django
# Getting any GeometryField keyword arguments that are not the
# default.
field_params = {}
if self.postgis_oid_lookup.get(description.type_code) == "geography":
field_params["geography"] = True
if srid != 4326:
field_params["srid"] = srid
if dim != 2:
field_params["dim"] = dim
return field_type, field_params
| PostGISIntrospection |
python | sqlalchemy__sqlalchemy | test/perf/orm2010.py | {
"start": 670,
"end": 883
} | class ____(Employee):
__tablename__ = "boss"
id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
golf_average = Column(Numeric)
__mapper_args__ = {"polymorphic_identity": "boss"}
| Boss |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 52035,
"end": 52531
} | class ____(themeable):
"""
Vertical spacing between the facet panels
Parameters
----------
theme_element : float
Size as a fraction of the figure width.
Notes
-----
It is deliberate to have the vertical spacing be a fraction of
the width. That means that when
[](`~plotnine.theme.themeables.panel_spacing_x`) is the
equal [](`~plotnine.theme.themeables.panel_spacing_x`),
the spaces in both directions will be equal.
"""
| panel_spacing_y |
python | numpy__numpy | numpy/f2py/tests/test_abstract_interface.py | {
"start": 188,
"end": 811
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")]
skip = ["add1", "add2"]
def test_abstract_interface(self):
assert self.module.ops_module.foo(3, 5) == (8, 13)
def test_parse_abstract_interface(self):
# Test gh18403
fpath = util.getpath("tests", "src", "abstract_interface",
"gh18403_mod.f90")
mod = crackfortran.crackfortran([str(fpath)])
assert len(mod) == 1
assert len(mod[0]["body"]) == 1
assert mod[0]["body"][0]["block"] == "abstract interface"
| TestAbstractInterface |
python | huggingface__transformers | src/transformers/models/dia/feature_extraction_dia.py | {
"start": 962,
"end": 8434
} | class ____(SequenceFeatureExtractor):
r"""
Constructs an Dia feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio waveform should be digitalized, expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used for padding.
hop_length (`int`, *optional*, defaults to 512):
Overlap length between successive windows.
"""
model_input_names = ["input_values", "n_quantizers"]
def __init__(
self,
feature_size: int = 1,
sampling_rate: int = 16000,
padding_value: float = 0.0,
hop_length: int = 512,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.hop_length = hop_length
def __call__(
self,
raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
padding: Optional[Union[bool, str, PaddingStrategy]] = None,
truncation: Optional[bool] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
`(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
(`feature_size = 2`).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, *optional*, defaults to `False`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
return_tensors (`str` or [`~utils.TensorType`], *optional*, default to 'pt'):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one.")
elif padding is None:
# by default let's pad the inputs
padding = True
is_batched = bool(
isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
elif not is_batched and not isinstance(raw_audio, np.ndarray):
raw_audio = np.asarray(raw_audio, dtype=np.float32)
elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
raw_audio = raw_audio.astype(np.float32)
# always return batch
if not is_batched:
raw_audio = [np.asarray(raw_audio).T]
# convert stereo to mono if necessary, unique to Dia
for idx, example in enumerate(raw_audio):
if self.feature_size == 2 and example.ndim == 2:
raw_audio[idx] = np.mean(example, -1)
# verify inputs are valid
for idx, example in enumerate(raw_audio):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2 and example.ndim != 1: # note the conversion before
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels")
input_values = BatchFeature({"input_values": raw_audio})
# temporarily treat it as if we were mono as we also convert stereo to mono
original_feature_size = self.feature_size
self.feature_size = 1
# normal padding on batch
padded_inputs = self.pad(
input_values,
max_length=max_length,
truncation=truncation,
padding=padding,
return_attention_mask=True,
pad_to_multiple_of=self.hop_length,
)
padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
input_values = []
for example in padded_inputs.pop("input_values"):
if self.feature_size == 1:
example = example[..., None]
input_values.append(example.T)
padded_inputs["input_values"] = input_values
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
# rewrite back to original feature size
self.feature_size = original_feature_size
return padded_inputs
__all__ = ["DiaFeatureExtractor"]
| DiaFeatureExtractor |
python | Textualize__textual | docs/examples/styles/margin_all.py | {
"start": 117,
"end": 951
} | class ____(App):
CSS_PATH = "margin_all.tcss"
def compose(self):
yield Grid(
Container(Placeholder("no margin", id="p1"), classes="bordered"),
Container(Placeholder("margin: 1", id="p2"), classes="bordered"),
Container(Placeholder("margin: 1 5", id="p3"), classes="bordered"),
Container(Placeholder("margin: 1 1 2 6", id="p4"), classes="bordered"),
Container(Placeholder("margin-top: 4", id="p5"), classes="bordered"),
Container(Placeholder("margin-right: 3", id="p6"), classes="bordered"),
Container(Placeholder("margin-bottom: 4", id="p7"), classes="bordered"),
Container(Placeholder("margin-left: 3", id="p8"), classes="bordered"),
)
if __name__ == "__main__":
app = MarginAllApp()
app.run()
| MarginAllApp |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 82075,
"end": 84161
} | class ____(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
self._predictions_idx = [[3], [3]]
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_precision_at_k = functools.partial(
_test_precision_at_k, test_case=self)
self._test_precision_at_top_k = functools.partial(
_test_precision_at_top_k, test_case=self)
self._test_average_precision_at_k = functools.partial(
_test_average_precision_at_k, test_case=self)
@test_util.run_deprecated_v1
def test_at_k1_nan(self):
for labels in self._labels:
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_precision_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_precision_at_top_k(
self._predictions_idx, labels, k=1, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_precision_at_top_k(
self._predictions_idx, labels, k=1, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2)
self._test_precision_at_top_k(
self._predictions_idx, labels, k=1, expected=1.0 / 2)
self._test_average_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2)
| SingleLabelPrecisionAtKTest |
python | getsentry__sentry | tests/sentry/services/nodestore/django/test_backend.py | {
"start": 421,
"end": 5128
} | class ____:
def setup_method(self) -> None:
self.ns = DjangoNodeStorage()
@pytest.mark.parametrize(
"node_data",
[
compress(b'{"foo": "bar"}'),
compress(pickle.dumps({"foo": "bar"})),
# hardcoded pickle value from python 3.6
compress(b"\x80\x03}q\x00X\x03\x00\x00\x00fooq\x01X\x03\x00\x00\x00barq\x02s."),
# hardcoded pickle value from python 2.7
compress(b"(dp0\nS'foo'\np1\nS'bar'\np2\ns."),
],
)
def test_get(self, node_data: str) -> None:
node = Node.objects.create(id="d2502ebbd7df41ceba8d3275595cac33", data=node_data)
result = self.ns.get(node.id)
assert result == {"foo": "bar"}
def test_get_multi(self) -> None:
Node.objects.create(id="d2502ebbd7df41ceba8d3275595cac33", data=compress(b'{"foo": "bar"}'))
Node.objects.create(id="5394aa025b8e401ca6bc3ddee3130edc", data=compress(b'{"foo": "baz"}'))
result = self.ns.get_multi(
["d2502ebbd7df41ceba8d3275595cac33", "5394aa025b8e401ca6bc3ddee3130edc"]
)
assert result == {
"d2502ebbd7df41ceba8d3275595cac33": {"foo": "bar"},
"5394aa025b8e401ca6bc3ddee3130edc": {"foo": "baz"},
}
def test_set(self) -> None:
self.ns.set("d2502ebbd7df41ceba8d3275595cac33", {"foo": "bar"})
assert Node.objects.get(id="d2502ebbd7df41ceba8d3275595cac33").data == compress(
b'{"foo":"bar"}'
)
def test_delete(self) -> None:
node = Node.objects.create(id="d2502ebbd7df41ceba8d3275595cac33", data='{"foo": "bar"}')
self.ns.delete(node.id)
assert not Node.objects.filter(id=node.id).exists()
def test_delete_multi(self) -> None:
node = Node.objects.create(id="d2502ebbd7df41ceba8d3275595cac33", data='{"foo": "bar"}')
self.ns.delete_multi([node.id])
assert not Node.objects.filter(id=node.id).exists()
def test_cleanup(self) -> None:
now = timezone.now()
cutoff = now - timedelta(days=1)
node = Node.objects.create(
id="d2502ebbd7df41ceba8d3275595cac33", timestamp=now, data='{"foo": "bar"}'
)
node2 = Node.objects.create(
id="d2502ebbd7df41ceba8d3275595cac34", timestamp=cutoff, data='{"foo": "bar"}'
)
self.ns.cleanup(cutoff)
assert Node.objects.filter(id=node.id).exists()
assert not Node.objects.filter(id=node2.id).exists()
def test_cache(self) -> None:
node_1 = ("a" * 32, {"foo": "a"})
node_2 = ("b" * 32, {"foo": "b"})
node_3 = ("c" * 32, {"foo": "c"})
for node_id, data in [node_1, node_2, node_3]:
Node.objects.create(id=node_id, data=compress(json_dumps(data).encode("utf8")))
# Get / get multi populates cache
assert self.ns.get(node_1[0]) == node_1[1]
assert self.ns.get_multi([node_2[0], node_3[0]]) == {
node_2[0]: node_2[1],
node_3[0]: node_3[1],
}
with mock.patch.object(Node.objects, "get") as mock_get:
assert self.ns.get(node_1[0]) == node_1[1]
assert self.ns.get(node_2[0]) == node_2[1]
assert self.ns.get(node_3[0]) == node_3[1]
assert mock_get.call_count == 0
with mock.patch.object(Node.objects, "filter") as mock_filter:
assert self.ns.get_multi([node_1[0], node_2[0], node_3[0]])
assert mock_filter.call_count == 0
# Manually deleted item should still retrievable from cache
Node.objects.get(id=node_1[0]).delete()
assert self.ns.get(node_1[0]) == node_1[1]
assert self.ns.get_multi([node_1[0], node_2[0]]) == {
node_1[0]: node_1[1],
node_2[0]: node_2[1],
}
# Deletion clars cache
self.ns.delete(node_1[0])
assert self.ns.get_multi([node_1[0], node_2[0]]) == {node_2[0]: node_2[1]}
self.ns.delete_multi([node_1[0], node_2[0]])
assert self.ns.get_multi([node_1[0], node_2[0]]) == {}
# Setting the item updates cache
new_value = {"event_id": "d" * 32}
self.ns.set(node_1[0], new_value)
with mock.patch.object(Node.objects, "get") as mock_get:
assert self.ns.get(node_1[0]) == new_value
assert mock_get.call_count == 0
# Missing rows are never cached
assert self.ns.get("node_4") is None
with mock.patch.object(Node.objects, "get") as mock_get:
mock_get.side_effect = Node.DoesNotExist
self.ns.get("node_4")
self.ns.get("node_4")
assert mock_get.call_count == 2
| TestDjangoNodeStorage |
python | scikit-learn__scikit-learn | sklearn/feature_extraction/_hash.py | {
"start": 603,
"end": 7866
} | class ____(TransformerMixin, BaseEstimator):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
For an efficiency comparison of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <feature_hashing>`.
.. versionadded:: 0.13
Parameters
----------
n_features : int, default=2**20
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
input_type : str, default='dict'
Choose a string from {'dict', 'pair', 'string'}.
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
dtype : numpy dtype, default=np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
.. versionchanged:: 0.19
``alternate_sign`` replaces the now deprecated ``non_negative``
parameter.
See Also
--------
DictVectorizer : Vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : Handles nominal/categorical features.
Notes
-----
This estimator is :term:`stateless` and does not need to be fitted.
However, we recommend to call :meth:`fit_transform` instead of
:meth:`transform`, as parameter validation is only performed in
:meth:`fit`.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
With `input_type="string"`, the input must be an iterable over iterables of
strings:
>>> h = FeatureHasher(n_features=8, input_type="string")
>>> raw_X = [["dog", "cat", "snake"], ["snake", "dog"], ["cat", "bird"]]
>>> f = h.transform(raw_X)
>>> f.toarray()
array([[ 0., 0., 0., -1., 0., -1., 0., 1.],
[ 0., 0., 0., -1., 0., -1., 0., 0.],
[ 0., -1., 0., 0., 0., 0., 0., 1.]])
"""
# raw_X should have been called X
__metadata_request__transform = {"raw_X": metadata_routing.UNUSED}
_parameter_constraints: dict = {
"n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="both")],
"input_type": [StrOptions({"dict", "pair", "string"})],
"dtype": "no_validation", # delegate to numpy
"alternate_sign": ["boolean"],
}
def __init__(
self,
n_features=(2**20),
*,
input_type="dict",
dtype=np.float64,
alternate_sign=True,
):
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.alternate_sign = alternate_sign
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X=None, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : Ignored
Not used, present here for API consistency by convention.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
FeatureHasher class instance.
"""
return self
def transform(self, raw_X):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
first_raw_X = next(raw_X)
if isinstance(first_raw_X, str):
raise ValueError(
"Samples can not be a single string. The input must be an iterable"
" over iterables of strings."
)
raw_X_ = chain([first_raw_X], raw_X)
raw_X = (((f, 1) for f in x) for x in raw_X_)
indices, indptr, values = _hashing_transform(
raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix(
(values, indices, indptr),
dtype=self.dtype,
shape=(n_samples, self.n_features),
)
X.sum_duplicates() # also sorts the indices
return X
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.two_d_array = False
if self.input_type == "string":
tags.input_tags.string = True
elif self.input_type == "dict":
tags.input_tags.dict = True
tags.requires_fit = False
return tags
| FeatureHasher |
python | google__pytype | pytype/pyi/parser.py | {
"start": 28981,
"end": 30869
} | class ____:
"""Pyi parsing options."""
python_version: tuple[int, int] = sys.version_info[:2]
platform: str = sys.platform
strict_primitive_comparisons: bool = True
@classmethod
def from_toplevel_options(cls, toplevel_options):
kwargs = {}
for k in _TOPLEVEL_PYI_OPTIONS:
kwargs[k] = getattr(toplevel_options, k)
return cls(**kwargs)
def parse_string(
src: str,
name: str | None = None,
filename: str | None = None,
options: PyiOptions | None = None,
):
return parse_pyi(src, filename=filename, module_name=name, options=options)
def parse_pyi(
src: str,
filename: str | None,
module_name: str,
options: PyiOptions | None = None,
debug_mode: bool = False,
) -> pytd.TypeDeclUnit:
"""Parse a pyi string."""
filename = filename or ""
options = options or PyiOptions()
feature_version = _feature_version(options.python_version)
root = _parse(src, feature_version, filename)
if debug_mode:
print(debug.dump(root, astlib, include_attributes=False))
root = _ConvertConstantsVisitor(filename).visit(root)
gen_pytd = _GeneratePytdVisitor(src, filename, module_name, options)
root = gen_pytd.visit(root)
if debug_mode:
print("---transformed parse tree--------------------")
print(root)
root = post_process_ast(root, src, module_name)
if debug_mode:
print("---post-processed---------------------")
print(root)
print("------------------------")
print(gen_pytd.defs.type_map)
print(gen_pytd.defs.module_path_map)
return root
def canonical_pyi(pyi, multiline_args=False, options=None):
"""Rewrite a pyi in canonical form."""
ast = parse_string(pyi, options=options)
ast = ast.Visit(visitors.ClassTypeToNamedType())
ast = ast.Visit(visitors.CanonicalOrderingVisitor())
ast.Visit(visitors.VerifyVisitor())
return pytd_utils.Print(ast, multiline_args)
| PyiOptions |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_bigquery.py | {
"start": 4771,
"end": 11474
} | class ____:
def test_serialization(self, insert_job_trigger):
"""
Asserts that the BigQueryInsertJobTrigger correctly serializes its arguments and classpath.
"""
classpath, kwargs = insert_job_trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.bigquery.BigQueryInsertJobTrigger"
assert kwargs == {
"cancel_on_kill": True,
"conn_id": TEST_CONN_ID,
"job_id": TEST_JOB_ID,
"project_id": TEST_GCP_PROJECT_ID,
"dataset_id": TEST_DATASET_ID,
"table_id": TEST_TABLE_ID,
"location": TEST_LOCATION,
"poll_interval": POLLING_PERIOD_SECONDS,
"impersonation_chain": TEST_IMPERSONATION_CHAIN,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_bigquery_insert_job_op_trigger_success(self, mock_job_status, insert_job_trigger):
"""
Tests the BigQueryInsertJobTrigger only fires once the query execution reaches a successful state.
"""
mock_job_status.return_value = {"status": "success", "message": "Job completed"}
generator = insert_job_trigger.run()
actual = await generator.asend(None)
assert (
TriggerEvent({"status": "success", "message": "Job completed", "job_id": TEST_JOB_ID}) == actual
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook._get_job")
async def test_bigquery_insert_job_trigger_running(self, mock_get_job, caplog, insert_job_trigger):
"""Test that BigQuery Triggers do not fire while a query is still running."""
mock_get_job.return_value = mock.MagicMock(state="RUNNING")
caplog.set_level(logging.INFO)
task = asyncio.create_task(insert_job_trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
assert "Bigquery job status is running. Sleeping for 4.0 seconds." in caplog.text
# Prevents error when task is destroyed while in "pending" state
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_bigquery_op_trigger_terminated(self, mock_job_status, caplog, insert_job_trigger):
"""Test that BigQuery Triggers fire the correct event in case of an error."""
mock_job_status.return_value = {
"status": "error",
"message": "The conn_id `bq_default` isn't defined",
}
generator = insert_job_trigger.run()
actual = await generator.asend(None)
assert (
TriggerEvent({"status": "error", "message": "The conn_id `bq_default` isn't defined"}) == actual
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_bigquery_op_trigger_exception(self, mock_job_status, caplog, insert_job_trigger):
"""Test that BigQuery Triggers fire the correct event in case of an error."""
mock_job_status.side_effect = Exception("Test exception")
generator = insert_job_trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "error", "message": "Test exception"}) == actual
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.cancel_job")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
@mock.patch("airflow.providers.google.cloud.triggers.bigquery.BigQueryInsertJobTrigger.safe_to_cancel")
async def test_bigquery_insert_job_trigger_cancellation(
self, mock_get_task_instance, mock_get_job_status, mock_cancel_job, caplog, insert_job_trigger
):
"""
Test that BigQueryInsertJobTrigger handles cancellation correctly, logs the appropriate message,
and conditionally cancels the job based on the `cancel_on_kill` attribute.
"""
mock_get_task_instance.return_value = True
insert_job_trigger.cancel_on_kill = True
insert_job_trigger.job_id = "1234"
mock_get_job_status.side_effect = [
{"status": "running", "message": "Job is still running"},
asyncio.CancelledError(),
]
mock_cancel_job.return_value = asyncio.Future()
mock_cancel_job.return_value.set_result(None)
caplog.set_level(logging.INFO)
try:
async for _ in insert_job_trigger.run():
pass
except asyncio.CancelledError:
pass
assert (
"Task was killed" in caplog.text
or "Bigquery job status is running. Sleeping for 4.0 seconds." in caplog.text
), "Expected messages about task status or cancellation not found in log."
mock_cancel_job.assert_awaited_once()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.cancel_job")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
@mock.patch("airflow.providers.google.cloud.triggers.bigquery.BigQueryInsertJobTrigger.safe_to_cancel")
async def test_bigquery_insert_job_trigger_cancellation_unsafe_cancellation(
self, mock_safe_to_cancel, mock_get_job_status, mock_cancel_job, caplog, insert_job_trigger
):
"""
Test that BigQueryInsertJobTrigger logs the appropriate message and does not cancel the job
if safe_to_cancel returns False even when the task is cancelled.
"""
mock_safe_to_cancel.return_value = False
insert_job_trigger.cancel_on_kill = True
insert_job_trigger.job_id = "1234"
# Simulate the initial job status as running
mock_get_job_status.side_effect = [
{"status": "running", "message": "Job is still running"},
asyncio.CancelledError(),
{"status": "running", "message": "Job is still running after cancellation"},
]
caplog.set_level(logging.INFO)
try:
async for _ in insert_job_trigger.run():
pass
except asyncio.CancelledError:
pass
assert "Skipping to cancel job" in caplog.text, (
"Expected message about skipping cancellation not found in log."
)
assert mock_get_job_status.call_count == 2, "Job status should be checked multiple times"
| TestBigQueryInsertJobTrigger |
python | sphinx-doc__sphinx | sphinx/util/parallel.py | {
"start": 1234,
"end": 5613
} | class ____:
"""Executes *nproc* tasks in parallel after forking."""
def __init__(self, nproc: int) -> None:
self.nproc = nproc
# (optional) function performed by each task on the result of main task
self._result_funcs: dict[int, Callable[[Any, Any], Any]] = {}
# task arguments
self._args: dict[int, list[Any] | None] = {}
# list of subprocesses (both started and waiting)
self._procs: dict[int, Any] = {}
# list of receiving pipe connections of running subprocesses
self._precvs: dict[int, Any] = {}
# list of receiving pipe connections of waiting subprocesses
self._precvs_waiting: dict[int, Any] = {}
# number of working subprocesses
self._pworking = 0
# task number of each subprocess
self._taskid = 0
def _process(
self, pipe: Any, func: Callable[[Any], Any] | Callable[[], Any], arg: Any
) -> None:
try:
collector = logging.LogCollector()
with collector.collect():
if arg is None:
ret = func() # type: ignore[call-arg]
else:
ret = func(arg) # type: ignore[call-arg]
failed = False
except BaseException as err:
failed = True
errmsg = traceback.format_exception_only(err.__class__, err)[0].strip()
ret = (errmsg, traceback.format_exc())
logging.convert_serializable(collector.logs)
pipe.send((failed, collector.logs, ret))
def add_task(
self,
task_func: Callable[[Any], Any] | Callable[[], Any],
arg: Any = None,
result_func: Callable[[Any, Any], Any] | None = None,
) -> None:
tid = self._taskid
self._taskid += 1
self._result_funcs[tid] = result_func or (lambda arg, result: None)
self._args[tid] = arg
precv, psend = multiprocessing.Pipe(False)
context: Any = multiprocessing.get_context('fork')
proc = context.Process(target=self._process, args=(psend, task_func, arg))
self._procs[tid] = proc
self._precvs_waiting[tid] = precv
try:
self._join_one()
except Exception:
# shutdown other child processes on failure
# (e.g. OSError: Failed to allocate memory)
self.terminate()
def join(self) -> None:
try:
while self._pworking:
if not self._join_one():
time.sleep(0.02)
finally:
# shutdown other child processes on failure
self.terminate()
def terminate(self) -> None:
for tid in list(self._precvs):
self._procs[tid].terminate()
self._result_funcs.pop(tid)
self._procs.pop(tid)
self._precvs.pop(tid)
self._pworking -= 1
def _join_one(self) -> bool:
joined_any = False
for tid, pipe in self._precvs.items():
if pipe.poll():
exc, logs, result = pipe.recv()
if exc:
raise SphinxParallelError(*result)
for log in logs:
logger.handle(log)
self._result_funcs.pop(tid)(self._args.pop(tid), result)
self._procs[tid].join()
self._precvs.pop(tid)
self._pworking -= 1
joined_any = True
break
while self._precvs_waiting and self._pworking < self.nproc:
newtid, newprecv = self._precvs_waiting.popitem()
self._precvs[newtid] = newprecv
self._procs[newtid].start()
self._pworking += 1
return joined_any
def make_chunks(arguments: Sequence[str], nproc: int, maxbatch: int = 10) -> list[Any]:
# determine how many documents to read in one go
nargs = len(arguments)
chunksize = nargs // nproc
if chunksize >= maxbatch:
# try to improve batch size vs. number of batches
chunksize = int(sqrt(nargs / nproc * maxbatch))
if chunksize == 0:
chunksize = 1
nchunks, rest = divmod(nargs, chunksize)
if rest:
nchunks += 1
# partition documents in "chunks" that will be written by one Process
return [arguments[i * chunksize : (i + 1) * chunksize] for i in range(nchunks)]
| ParallelTasks |
python | modin-project__modin | modin/config/envvars.py | {
"start": 37637,
"end": 37824
} | class ____(EnvironmentVariable, type=bool):
"""Set to true to test reading from SQL server."""
varname = "MODIN_TEST_READ_FROM_SQL_SERVER"
default = False
| TestReadFromSqlServer |
python | kubernetes-client__python | kubernetes/client/models/v1_subject_access_review_spec.py | {
"start": 383,
"end": 8354
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'extra': 'dict(str, list[str])',
'groups': 'list[str]',
'non_resource_attributes': 'V1NonResourceAttributes',
'resource_attributes': 'V1ResourceAttributes',
'uid': 'str',
'user': 'str'
}
attribute_map = {
'extra': 'extra',
'groups': 'groups',
'non_resource_attributes': 'nonResourceAttributes',
'resource_attributes': 'resourceAttributes',
'uid': 'uid',
'user': 'user'
}
def __init__(self, extra=None, groups=None, non_resource_attributes=None, resource_attributes=None, uid=None, user=None, local_vars_configuration=None): # noqa: E501
"""V1SubjectAccessReviewSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._extra = None
self._groups = None
self._non_resource_attributes = None
self._resource_attributes = None
self._uid = None
self._user = None
self.discriminator = None
if extra is not None:
self.extra = extra
if groups is not None:
self.groups = groups
if non_resource_attributes is not None:
self.non_resource_attributes = non_resource_attributes
if resource_attributes is not None:
self.resource_attributes = resource_attributes
if uid is not None:
self.uid = uid
if user is not None:
self.user = user
@property
def extra(self):
"""Gets the extra of this V1SubjectAccessReviewSpec. # noqa: E501
Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here. # noqa: E501
:return: The extra of this V1SubjectAccessReviewSpec. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._extra
@extra.setter
def extra(self, extra):
"""Sets the extra of this V1SubjectAccessReviewSpec.
Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here. # noqa: E501
:param extra: The extra of this V1SubjectAccessReviewSpec. # noqa: E501
:type: dict(str, list[str])
"""
self._extra = extra
@property
def groups(self):
"""Gets the groups of this V1SubjectAccessReviewSpec. # noqa: E501
Groups is the groups you're testing for. # noqa: E501
:return: The groups of this V1SubjectAccessReviewSpec. # noqa: E501
:rtype: list[str]
"""
return self._groups
@groups.setter
def groups(self, groups):
"""Sets the groups of this V1SubjectAccessReviewSpec.
Groups is the groups you're testing for. # noqa: E501
:param groups: The groups of this V1SubjectAccessReviewSpec. # noqa: E501
:type: list[str]
"""
self._groups = groups
@property
def non_resource_attributes(self):
"""Gets the non_resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
:return: The non_resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
:rtype: V1NonResourceAttributes
"""
return self._non_resource_attributes
@non_resource_attributes.setter
def non_resource_attributes(self, non_resource_attributes):
"""Sets the non_resource_attributes of this V1SubjectAccessReviewSpec.
:param non_resource_attributes: The non_resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
:type: V1NonResourceAttributes
"""
self._non_resource_attributes = non_resource_attributes
@property
def resource_attributes(self):
"""Gets the resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
:return: The resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
:rtype: V1ResourceAttributes
"""
return self._resource_attributes
@resource_attributes.setter
def resource_attributes(self, resource_attributes):
"""Sets the resource_attributes of this V1SubjectAccessReviewSpec.
:param resource_attributes: The resource_attributes of this V1SubjectAccessReviewSpec. # noqa: E501
:type: V1ResourceAttributes
"""
self._resource_attributes = resource_attributes
@property
def uid(self):
"""Gets the uid of this V1SubjectAccessReviewSpec. # noqa: E501
UID information about the requesting user. # noqa: E501
:return: The uid of this V1SubjectAccessReviewSpec. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1SubjectAccessReviewSpec.
UID information about the requesting user. # noqa: E501
:param uid: The uid of this V1SubjectAccessReviewSpec. # noqa: E501
:type: str
"""
self._uid = uid
@property
def user(self):
"""Gets the user of this V1SubjectAccessReviewSpec. # noqa: E501
User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups # noqa: E501
:return: The user of this V1SubjectAccessReviewSpec. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this V1SubjectAccessReviewSpec.
User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups # noqa: E501
:param user: The user of this V1SubjectAccessReviewSpec. # noqa: E501
:type: str
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SubjectAccessReviewSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SubjectAccessReviewSpec):
return True
return self.to_dict() != other.to_dict()
| V1SubjectAccessReviewSpec |
python | doocs__leetcode | lcof/面试题50. 第一个只出现一次的字符/Solution.py | {
"start": 0,
"end": 177
} | class ____:
def firstUniqChar(self, s: str) -> str:
cnt = Counter(s)
for c in s:
if cnt[c] == 1:
return c
return " "
| Solution |
python | wandb__wandb | wandb/vendor/pygments/lexers/configs.py | {
"start": 23071,
"end": 24298
} | class ____(RegexLexer):
"""
Lexer for termcap database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Termcap'
aliases = ['termcap']
filenames = ['termcap', 'termcap.src']
mimetypes = []
# NOTE:
# * multiline with trailing backslash
# * separator is ':'
# * to embed colon as data, we must use \072
# * space after separator is not allowed (mayve)
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#:|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r':', Punctuation, 'defs'),
(r'\|', Punctuation),
(r'[^:|]+', Name.Attribute),
],
'defs': [
(r'\\\n[ \t]*', Text),
(r'\n[ \t]*', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r':', Punctuation),
(r'[^\s:=#]+', Name.Class),
],
'data': [
(r'\\072', Literal),
(r':', Punctuation, '#pop'),
(r'[^:\\]+', Literal), # for performance
(r'.', Literal),
],
}
| TermcapLexer |
python | lazyprogrammer__machine_learning_examples | rl2/cartpole/q_learning_bins.py | {
"start": 880,
"end": 1832
} | class ____:
def __init__(self):
# Note: to make this better you could look at how often each bin was
# actually used while running the script.
# It's not clear from the high/low values nor sample() what values
# we really expect to get.
self.cart_position_bins = np.linspace(-2.4, 2.4, 9)
self.cart_velocity_bins = np.linspace(-2, 2, 9) # (-inf, inf) (I did not check that these were good values)
self.pole_angle_bins = np.linspace(-0.4, 0.4, 9)
self.pole_velocity_bins = np.linspace(-3.5, 3.5, 9) # (-inf, inf) (I did not check that these were good values)
def transform(self, observation):
# returns an int
cart_pos, cart_vel, pole_angle, pole_vel = observation
return build_state([
to_bin(cart_pos, self.cart_position_bins),
to_bin(cart_vel, self.cart_velocity_bins),
to_bin(pole_angle, self.pole_angle_bins),
to_bin(pole_vel, self.pole_velocity_bins),
])
| FeatureTransformer |
python | getsentry__sentry | tests/sentry/models/test_release.py | {
"start": 10778,
"end": 27288
} | class ____(TestCase):
@receivers_raise_on_send()
def test_simple(self) -> None:
org = self.create_organization(owner=Factories.create_user())
project = self.create_project(organization=org, name="foo")
group = self.create_group(project=project)
add_group_to_inbox(group, GroupInboxReason.MANUAL)
assert GroupInbox.objects.filter(group=group).exists()
repo = Repository.objects.create(organization_id=org.id, name="test/repo")
commit = Commit.objects.create(
organization_id=org.id,
repository_id=repo.id,
message="fixes %s" % (group.qualified_short_id),
key="alksdflskdfjsldkfajsflkslk",
)
commit2 = Commit.objects.create(
organization_id=org.id,
repository_id=repo.id,
message="i fixed something",
key="lskfslknsdkcsnlkdflksfdkls",
)
assert GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
release = Release.objects.create(version="abcdabc", organization=org)
release.add_project(project)
release.set_commits(
[
{"id": commit.key, "repository": repo.name},
{"id": commit2.key, "repository": repo.name},
{"id": "a" * 40, "repository": repo.name},
{"id": "b" * 40, "repository": repo.name, "message": "#skipsentry"},
]
)
assert ReleaseCommit.objects.filter(commit=commit, release=release).exists()
assert ReleaseCommit.objects.filter(commit=commit2, release=release).exists()
assert Group.objects.get(id=group.id).status == GroupStatus.RESOLVED
# test that backfilling works
assert Commit.objects.filter(key="a" * 40, repository_id=repo.id).exists()
assert not Commit.objects.filter(key="b" * 40, repository_id=repo.id).exists()
release = Release.objects.get(id=release.id)
assert release.commit_count == 3
assert release.authors == []
assert release.last_commit_id == commit.id
assert ReleaseHeadCommit.objects.filter(
release_id=release.id, commit_id=commit.id, repository_id=repo.id
).exists()
assert not GroupInbox.objects.filter(group=group).exists()
@receivers_raise_on_send()
def test_backfilling_commits(self) -> None:
org = self.create_organization(owner=Factories.create_user())
project = self.create_project(organization=org, name="foo")
group = self.create_group(project=project)
add_group_to_inbox(group, GroupInboxReason.MANUAL)
assert GroupInbox.objects.filter(group=group).exists()
repo = Repository.objects.create(organization_id=org.id, name="test/repo")
commit = Commit.objects.create(repository_id=repo.id, organization_id=org.id, key="b" * 40)
release = Release.objects.create(version="abcdabc", organization=org)
release.add_project(project)
release.set_commits(
[
{
"id": "a" * 40,
"repository": repo.name,
"author_email": "Foo@example.com", # throw in an upper case letter
"author_name": "foo bar baz",
"message": "i fixed a bug",
},
{
"id": "b" * 40,
"repository": repo.name,
"author_email": "foo@example.com",
"author_name": "foo bar baz",
"message": "i fixed another bug",
},
{
"id": "c" * 40,
"repository": repo.name,
"author_email": "foo@example.com",
"author_name": "foo bar baz",
"message": "fixes %s" % (group.qualified_short_id),
},
]
)
author = CommitAuthor.objects.get(
name="foo bar baz", email="foo@example.com", organization_id=org.id
)
commit_a = Commit.objects.get(repository_id=repo.id, organization_id=org.id, key="a" * 40)
assert commit_a
assert commit_a.message == "i fixed a bug"
assert commit_a.author_id == author.id
commit_c = Commit.objects.get(repository_id=repo.id, organization_id=org.id, key="c" * 40)
assert commit_c
assert commit_c.message is not None
assert "fixes" in commit_c.message
assert commit_c.author_id == author.id
# test that backfilling fills in missing message and author
commit = Commit.objects.get(id=commit.id)
assert commit.message == "i fixed another bug"
assert commit.author_id == author.id
assert ReleaseCommit.objects.filter(
commit__key="a" * 40, commit__repository_id=repo.id, release=release
).exists()
assert ReleaseCommit.objects.filter(
commit__key="b" * 40, commit__repository_id=repo.id, release=release
).exists()
assert ReleaseCommit.objects.filter(
commit__key="c" * 40, commit__repository_id=repo.id, release=release
).exists()
assert GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit_c.id
).exists()
assert GroupResolution.objects.filter(group=group, release=release).exists()
assert (
GroupResolution.objects.get(group=group, release=release).status
== GroupResolution.Status.resolved
)
assert Group.objects.get(id=group.id).status == GroupStatus.RESOLVED
latest_commit = Commit.objects.get(repository_id=repo.id, key="a" * 40)
release = Release.objects.get(id=release.id)
assert release.commit_count == 3
assert release.authors == [str(author.id)]
assert release.last_commit_id == latest_commit.id
assert not GroupInbox.objects.filter(group=group).exists()
@freeze_time()
@receivers_raise_on_send()
def test_using_saved_data(self) -> None:
org = self.create_organization(owner=Factories.create_user())
project = self.create_project(organization=org, name="foo")
repo = Repository.objects.create(organization_id=org.id, name="test/repo")
author = CommitAuthor.objects.create(
name="foo bar baz", email="foo@example.com", organization_id=org.id
)
author.preload_users()
Commit.objects.create(
repository_id=repo.id,
organization_id=org.id,
key="b" * 40,
author=author,
date_added="2019-03-01 12:00:00+00:00",
message="fixed a thing",
)
release = Release.objects.create(version="abcdabc", organization=org)
release.add_project(project)
release.set_commits(
[
{"id": "a" * 40, "repository": repo.name},
{"id": "b" * 40, "repository": repo.name},
{"id": "c" * 40, "repository": repo.name},
]
)
date_format = "%Y-%m-%d %H:%M:%S"
assert Commit.objects.filter(
repository_id=repo.id, organization_id=org.id, key="a" * 40
).exists()
commit_c = Commit.objects.get(repository_id=repo.id, organization_id=org.id, key="c" * 40)
assert commit_c.date_added.strftime(date_format) == timezone.now().strftime(date_format)
assert commit_c.message is None
# Using the id/repository payload should retain existing data.
commit_b = Commit.objects.get(repository_id=repo.id, organization_id=org.id, key="b" * 40)
assert commit_b.message == "fixed a thing"
assert commit_b.date_added.strftime(date_format) == "2019-03-01 12:00:00"
latest_commit = Commit.objects.get(repository_id=repo.id, key="a" * 40)
release = Release.objects.get(id=release.id)
assert release.commit_count == 3
assert release.authors == [str(author.id)]
assert release.last_commit_id == latest_commit.id
@patch("sentry.models.Commit.update")
@freeze_time()
@receivers_raise_on_send()
def test_multiple_releases_only_updates_once(self, mock_update: MagicMock) -> None:
org = self.create_organization(owner=Factories.create_user())
project = self.create_project(organization=org, name="foo")
repo = Repository.objects.create(organization_id=org.id, name="test/repo")
release = Release.objects.create(version="abcdabc", organization=org)
release.add_project(project)
release.set_commits([{"id": "b" * 40, "repository": repo.name, "message": "old message"}])
# Setting the exact same commits, shouldn't call update
release.set_commits([{"id": "b" * 40, "repository": repo.name, "message": "old message"}])
assert mock_update.call_count == 0
# Setting a different commit message, should call update
release.set_commits([{"id": "b" * 40, "repository": repo.name, "message": "new message"}])
assert mock_update.call_count == 1
@receivers_raise_on_send()
def test_resolution_support_full_featured(self) -> None:
org = self.create_organization(owner=self.user)
project = self.create_project(organization=org, name="foo")
group = self.create_group(project=project)
add_group_to_inbox(group, GroupInboxReason.MANUAL)
assert GroupInbox.objects.filter(group=group).exists()
repo = Repository.objects.create(organization_id=org.id, name="test/repo")
author = CommitAuthor.objects.create(
organization_id=org.id, name="Foo Bar", email=self.user.email
)
author.preload_users()
commit = Commit.objects.create(
organization_id=org.id,
repository_id=repo.id,
message="fixes %s" % (group.qualified_short_id),
key="alksdflskdfjsldkfajsflkslk",
author=author,
)
old_release = self.create_release(project=project, version="pre-1.0")
resolution = GroupResolution.objects.create(
group=group,
release=old_release,
type=GroupResolution.Type.in_next_release,
status=GroupResolution.Status.pending,
)
release = self.create_release(project=project, version="abcdabc")
release.set_commits([{"id": commit.key, "repository": repo.name}])
assert GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
# Pull the object from the DB again to test updated attributes
resolution = GroupResolution.objects.get(group=group)
assert resolution.status == GroupResolution.Status.resolved
assert resolution.release == release
assert resolution.type == GroupResolution.Type.in_release
assert resolution.actor_id == self.user.id
assert Group.objects.get(id=group.id).status == GroupStatus.RESOLVED
assert not GroupInbox.objects.filter(group=group).exists()
@receivers_raise_on_send()
def test_resolution_support_without_author(self) -> None:
org = self.create_organization(owner=Factories.create_user())
project = self.create_project(organization=org, name="foo")
group = self.create_group(project=project)
add_group_to_inbox(group, GroupInboxReason.MANUAL)
assert GroupInbox.objects.filter(group=group).exists()
repo = Repository.objects.create(organization_id=org.id, name="test/repo")
commit = Commit.objects.create(
organization_id=org.id,
repository_id=repo.id,
message="fixes %s" % (group.qualified_short_id),
key="alksdflskdfjsldkfajsflkslk",
)
release = self.create_release(project=project, version="abcdabc")
release.set_commits([{"id": commit.key, "repository": repo.name}])
assert GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
resolution = GroupResolution.objects.get(group=group)
assert resolution.status == GroupResolution.Status.resolved
assert resolution.release == release
assert resolution.type == GroupResolution.Type.in_release
assert resolution.actor_id is None
assert Group.objects.get(id=group.id).status == GroupStatus.RESOLVED
assert not GroupInbox.objects.filter(group=group).exists()
@patch("sentry.integrations.example.integration.ExampleIntegration.sync_status_outbound")
@receivers_raise_on_send()
def test_resolution_support_with_integration(
self, mock_sync_status_outbound: MagicMock
) -> None:
org = self.create_organization(owner=Factories.create_user())
integration = self.create_integration(
organization=org,
external_id="example:1",
provider="example",
name="Example",
oi_params={
"config": {
"sync_comments": True,
"sync_status_outbound": True,
"sync_status_inbound": True,
"sync_assignee_outbound": True,
"sync_assignee_inbound": True,
}
},
)
project = self.create_project(organization=org, name="foo")
group = self.create_group(project=project)
add_group_to_inbox(group, GroupInboxReason.MANUAL)
assert GroupInbox.objects.filter(group=group).exists()
external_issue = ExternalIssue.objects.get_or_create(
organization_id=org.id, integration_id=integration.id, key="APP-%s" % group.id
)[0]
GroupLink.objects.get_or_create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.issue,
linked_id=external_issue.id,
relationship=GroupLink.Relationship.references,
)[0]
repo = Repository.objects.create(organization_id=org.id, name="test/repo")
commit = Commit.objects.create(
organization_id=org.id,
repository_id=repo.id,
message="fixes %s" % (group.qualified_short_id),
key="alksdflskdfjsldkfajsflkslk",
)
release = self.create_release(project=project, version="abcdabc")
with self.tasks():
with self.feature({"organizations:integrations-issue-sync": True}):
release.set_commits([{"id": commit.key, "repository": repo.name}])
mock_sync_status_outbound.assert_called_once_with(external_issue, True, group.project_id)
assert GroupLink.objects.filter(
group_id=group.id, linked_type=GroupLink.LinkedType.commit, linked_id=commit.id
).exists()
resolution = GroupResolution.objects.get(group=group)
assert resolution.status == GroupResolution.Status.resolved
assert resolution.release == release
assert resolution.type == GroupResolution.Type.in_release
assert resolution.actor_id is None
assert Group.objects.get(id=group.id).status == GroupStatus.RESOLVED
assert not GroupInbox.objects.filter(group=group).exists()
@receivers_raise_on_send()
def test_long_email(self) -> None:
org = self.create_organization(owner=Factories.create_user())
project = self.create_project(organization=org, name="foo")
repo = Repository.objects.create(organization_id=org.id, name="test/repo")
release = Release.objects.create(version="abcdabc", organization=org)
release.add_project(project)
commit_email = "a" * 248 + "@a.com" # 254 chars long, max valid email.
release.set_commits(
[
{
"id": "a" * 40,
"repository": repo.name,
"author_name": "foo bar baz",
"author_email": commit_email,
"message": "i fixed a bug",
}
]
)
commit = Commit.objects.get(repository_id=repo.id, organization_id=org.id, key="a" * 40)
assert commit.author is not None
assert commit.author.email == truncatechars(commit_email, 75)
| SetCommitsTestCase |
python | walkccc__LeetCode | solutions/1906. Minimum Absolute Difference Queries/1906.py | {
"start": 0,
"end": 732
} | class ____:
def minDifference(
self,
nums: list[int],
queries: list[list[int]],
) -> list[int]:
numToIndices = [[] for _ in range(101)]
for i, num in enumerate(nums):
numToIndices[num].append(i)
if len(numToIndices[nums[0]]) == len(nums):
return [-1] * len(queries)
ans = []
for l, r in queries:
prevNum = -1
minDiff = 101
for num in range(1, 101):
indices = numToIndices[num]
i = bisect_left(indices, l)
if i == len(indices) or indices[i] > r:
continue
if prevNum != -1:
minDiff = min(minDiff, num - prevNum)
prevNum = num
ans.append(-1 if minDiff == 101 else minDiff)
return ans
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.