language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | aimacode__aima-python | text.py | {
"start": 7882,
"end": 8326
} | class ____(IRSystem):
"""A trivial IR system over a small collection of Unix man pages."""
def __init__(self):
IRSystem.__init__(self, stopwords="how do i the a of")
import os
aima_root = os.path.dirname(__file__)
mandir = os.path.join(aima_root, 'aima-data/MAN/')
man_files = [mandir + f for f in os.listdir(mandir) if f.endswith('.txt')]
self.index_collection(man_files)
| UnixConsultant |
python | fastai__fastai | fastai/data/transforms.py | {
"start": 13784,
"end": 14252
} | class ____(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab):
super().__init__(vocab, sort=vocab==None)
self.c = len(vocab)
def encodes(self, o): return TensorMultiCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
# %% ../../nbs/05_data.transforms.ipynb 94
| EncodedMultiCategorize |
python | openai__openai-python | src/openai/types/beta/chatkit/chat_session_history.py | {
"start": 186,
"end": 467
} | class ____(BaseModel):
enabled: bool
"""Indicates if chat history is persisted for the session."""
recent_threads: Optional[int] = None
"""Number of prior threads surfaced in history views.
Defaults to null when all history is retained.
"""
| ChatSessionHistory |
python | numpy__numpy | numpy/lib/tests/test_arraypad.py | {
"start": 37287,
"end": 42967
} | class ____:
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
-4, -3, -2, -1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = np.pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = np.pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = np.pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
a = np.pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
a = np.pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
| TestSymmetric |
python | kubernetes-client__python | kubernetes/base/dynamic/exceptions.py | {
"start": 3673,
"end": 3763
} | class ____(DynamicApiError):
""" 503: StatusServiceUnavailable """
| ServiceUnavailableError |
python | pytorch__pytorch | torch/_inductor/triton_bundler.py | {
"start": 1687,
"end": 1903
} | class ____:
"""
Collection of artifacts for a particular kernel.
"""
kernel_hash: str
device: int
artifacts: list[TritonKernelArtifact]
@dataclasses.dataclass(frozen=True)
| TritonKernelArtifacts |
python | networkx__networkx | networkx/classes/coreviews.py | {
"start": 2124,
"end": 2738
} | class ____(AdjacencyView):
"""An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps.
It is a View into a dict-of-dict-of-dict-of-dict data structure.
The inner level of dict is read-write. But the
outer levels are read-only.
See Also
========
AtlasView: View into dict-of-dict
AdjacencyView: View into dict-of-dict-of-dict
"""
__slots__ = () # Still uses AtlasView slots names _atlas
def __getitem__(self, name):
return AdjacencyView(self._atlas[name])
def copy(self):
return {n: self[n].copy() for n in self._atlas}
| MultiAdjacencyView |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-together/llama_index/llms/together/base.py | {
"start": 98,
"end": 1209
} | class ____(OpenAILike):
"""
Together LLM.
Examples:
`pip install llama-index-llms-together`
```python
from llama_index.llms.together import TogetherLLM
# set api key in env or in llm
# import os
# os.environ["TOGETHER_API_KEY"] = "your api key"
llm = TogetherLLM(
model="mistralai/Mixtral-8x7B-Instruct-v0.1", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = "https://api.together.xyz/v1",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("TOGETHER_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "TogetherLLM"
| TogetherLLM |
python | Lightning-AI__lightning | tests/tests_pytorch/tuner/test_scale_batch_size.py | {
"start": 1200,
"end": 1507
} | class ____(BoringDataModule):
def __init__(self, batch_size):
super().__init__()
if batch_size is not None:
self.batch_size = batch_size
def train_dataloader(self):
return DataLoader(self.random_train, batch_size=getattr(self, "batch_size", 1))
| BatchSizeDataModule |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_snippets.py | {
"start": 15565,
"end": 16075
} | class ____(util.MdCase):
"""Test snippet cases with path-like objects."""
extension = [
'pymdownx.snippets'
]
extension_configs = {
'pymdownx.snippets': {
'base_path': _PathLikeExampleObject()
}
}
def test_inline(self):
"""Test inline."""
self.check_markdown(
R'''
---8<--- "a.txt"
''',
R'''
<p>Snippet</p>
''',
True
)
| TestSnippetsPathLike |
python | eth-brownie__brownie | brownie/network/gas/strategies.py | {
"start": 6483,
"end": 8091
} | class ____(BlockGasStrategy):
"""
Block based scaling gas strategy using the GraphQL and the Geth mempool.
The yielded gas price is determined by sorting transactions in the mempool
according to gas price, and returning the price of the transaction at `position`.
This is the same technique used by the GasNow API.
A position of 500 should place a transaction within the 2nd block to be mined.
A position of 200 or less should place it within the next block.
"""
def __init__(
self,
position: int = 500,
graphql_endpoint: str = None,
block_duration: int = 2,
max_gas_price: Wei = None,
):
super().__init__(block_duration)
self.position = position
if graphql_endpoint is None:
graphql_endpoint = f"{web3.provider.endpoint_uri}/graphql"
self.graphql_endpoint = graphql_endpoint
self.max_gas_price = Wei(max_gas_price) or 2**256 - 1
def get_gas_price(self) -> Generator[int, None, None]:
query = "{ pending { transactions { gasPrice }}}"
while True:
response = requests.post(self.graphql_endpoint, json={"query": query})
response.raise_for_status()
if "error" in response.json():
raise RPCRequestError("could not fetch mempool, run geth with `--graphql` flag")
data = response.json()["data"]["pending"]["transactions"]
prices = sorted((int(x["gasPrice"], 16) for x in data), reverse=True)
yield min(prices[: self.position][-1], self.max_gas_price)
| GethMempoolStrategy |
python | fluentpython__example-code-2e | 24-class-metaprog/checked/metaclass/checkedlib.py | {
"start": 2070,
"end": 3095
} | class ____:
def __init__(self, name: str, constructor: Callable) -> None:
if not callable(constructor) or constructor is type(None):
raise TypeError(f'{name!r} type hint must be callable')
self.name = name
self.storage_name = '_' + name # <1>
self.constructor = constructor
def __get__(self, instance, owner=None):
if instance is None: # <2>
return self
return getattr(instance, self.storage_name) # <3>
def __set__(self, instance: Any, value: Any) -> None:
if value is ...:
value = self.constructor()
else:
try:
value = self.constructor(value)
except (TypeError, ValueError) as e:
type_name = self.constructor.__name__
msg = f'{value!r} is not compatible with {self.name}:{type_name}'
raise TypeError(msg) from e
setattr(instance, self.storage_name, value) # <4>
# end::CHECKED_FIELD[]
# tag::CHECKED_META[]
| Field |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 11855,
"end": 12298
} | class ____(WeaviateBaseError):
"""Is raised when a client method tries to use a new feature with an old Weaviate version."""
def __init__(self, feature: str, current: str, minimum: str) -> None:
msg = f"""{feature} is not supported by your connected server's Weaviate version. The current version is {current}, but the feature requires at least version {minimum}."""
super().__init__(msg)
| WeaviateUnsupportedFeatureError |
python | hynek__structlog | tests/test_dev.py | {
"start": 22507,
"end": 24521
} | class ____:
def test_default(self):
"""
If Rich is present, it's the default.
"""
assert dev.default_exception_formatter is dev.rich_traceback
def test_does_not_blow_up(self, sio):
"""
We trust Rich to do the right thing, so we just exercise the function
and check the first new line that we add manually is present.
"""
try:
0 / 0
except ZeroDivisionError:
dev.rich_traceback(sio, sys.exc_info())
assert sio.getvalue().startswith("\n")
def test_width_minus_one(self, sio):
"""
If width is -1, it raises a DeprecationWarning and is replaced by None to let `rich` handle it.
"""
rtf = dev.RichTracebackFormatter(width=-1)
with pytest.deprecated_call():
try:
0 / 0
except ZeroDivisionError:
rtf(sio, sys.exc_info())
assert rtf.width is None
@pytest.mark.parametrize("code_width_support", [True, False])
def test_code_width_support(self, sio, code_width_support):
"""
If rich does not support code_width, it should not fail
"""
from rich.traceback import Trace
tb = mock.Mock(
spec=[
attr
for attr in dir(dev.Traceback(Trace([])))
if (code_width_support or attr != "code_width")
]
)
tb.__rich_console__.return_value = "for Python 3.8 compatibility"
with mock.patch.object(
dev.Traceback, "from_exception", return_value=tb
) as factory:
try:
0 / 0
except ZeroDivisionError:
dev.rich_traceback(sio, sys.exc_info())
assert "code_width" not in factory.call_args.kwargs
if code_width_support:
assert tb.code_width == 88
@pytest.mark.skipif(
dev.better_exceptions is None, reason="Needs better-exceptions."
)
| TestRichTracebackFormatter |
python | huggingface__transformers | examples/modular-transformers/modeling_roberta.py | {
"start": 22484,
"end": 24474
} | class ____(PreTrainedModel):
config_class = RobertaConfig
base_model_prefix = "roberta"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": RobertaLayer,
"attentions": RobertaSelfAttention,
"cross_attentions": RobertaCrossAttention,
}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.zero_()
elif isinstance(module, nn.Embedding):
module.weight.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.zero_()
module.weight.fill_(1.0)
elif isinstance(module, RobertaLMPredictionHead):
module.bias.zero_()
@auto_docstring(
custom_intro="""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
)
| RobertaPreTrainedModel |
python | numba__numba | numba/tests/test_entrypoints.py | {
"start": 347,
"end": 509
} | class ____(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return '_DummyClass(%f, %f)' % self.value
| _DummyClass |
python | pypa__warehouse | tests/unit/oidc/models/test_github.py | {
"start": 3206,
"end": 28619
} | class ____:
@pytest.mark.parametrize("environment", [None, "some_environment"])
def test_lookup_fails_invalid_workflow_ref(self, environment):
signed_claims = {
"repository": "foo/bar",
"job_workflow_ref": ("foo/bar/.github/workflows/.yml@refs/heads/main"),
"repository_owner_id": "1234",
}
if environment:
signed_claims["environment"] = environment
# The `job_workflow_ref` is malformed, so no queries are performed.
with pytest.raises(
errors.InvalidPublisherError,
match="Could not job extract workflow filename from OIDC claims",
):
github.GitHubPublisher.lookup_by_claims(pretend.stub(), signed_claims)
@pytest.mark.parametrize("environment", ["", "some_environment"])
@pytest.mark.parametrize(
("workflow_a", "workflow_b"),
[
("release-pypi.yml", "release_pypi.yml"),
("release%pypi.yml", "release-pypi.yml"),
],
)
def test_lookup_escapes(self, db_request, environment, workflow_a, workflow_b):
GitHubPublisherFactory(
id="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
repository_owner="foo",
repository_name="bar",
repository_owner_id="1234",
workflow_filename=workflow_a,
environment=environment,
)
GitHubPublisherFactory(
id="bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
repository_owner="foo",
repository_name="bar",
repository_owner_id="1234",
workflow_filename=workflow_b,
environment=environment,
)
for workflow in (workflow_a, workflow_b):
signed_claims = {
"repository": "foo/bar",
"job_workflow_ref": (
f"foo/bar/.github/workflows/{workflow}@refs/heads/main"
),
"repository_owner_id": "1234",
}
if environment:
signed_claims["environment"] = environment
assert (
github.GitHubPublisher.lookup_by_claims(
db_request.db, signed_claims
).workflow_filename
== workflow
)
def test_lookup_no_matching_publishers(self, db_request):
GitHubPublisherFactory(
id="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
repository_owner="foo",
repository_name="bar",
repository_owner_id="1234",
workflow_filename="release.yml",
environment="environment",
)
signed_claims = {
"repository": "foo/bar",
"job_workflow_ref": (
"foo/bar/.github/workflows/release.yml@refs/heads/main"
),
"repository_owner_id": "1234",
"environment": "another_environment",
}
with pytest.raises(errors.InvalidPublisherError) as e:
github.GitHubPublisher.lookup_by_claims(db_request.db, signed_claims)
assert str(e.value) == "Publisher with matching claims was not found"
def test_github_publisher_all_known_claims(self):
assert github.GitHubPublisher.all_known_claims() == {
# required verifiable claims
"sub",
"repository",
"repository_owner",
"repository_owner_id",
"job_workflow_ref",
# required unverifiable claims
"ref",
"sha",
# optional verifiable claims
"environment",
# preverified claims
"iss",
"iat",
"nbf",
"exp",
"aud",
"jti",
# unchecked claims
"actor",
"actor_id",
"run_id",
"run_number",
"run_attempt",
"head_ref",
"base_ref",
"event_name",
"ref_type",
"repository_id",
"workflow",
"repository_visibility",
"workflow_sha",
"job_workflow_sha",
"workflow_ref",
"runner_environment",
"environment_node_id",
"enterprise",
"enterprise_id",
"ref_protected",
"check_run_id",
}
def test_github_publisher_computed_properties(self):
publisher = github.GitHubPublisher(
repository_name="fakerepo",
repository_owner="fakeowner",
repository_owner_id="fakeid",
workflow_filename="fakeworkflow.yml",
environment="fakeenv",
)
for claim_name in publisher.__required_verifiable_claims__.keys():
assert getattr(publisher, claim_name) is not None
assert str(publisher) == "fakeworkflow.yml"
assert publisher.publisher_base_url == "https://github.com/fakeowner/fakerepo"
assert publisher.publisher_url() == "https://github.com/fakeowner/fakerepo"
assert (
publisher.publisher_url({"sha": "somesha"})
== "https://github.com/fakeowner/fakerepo/commit/somesha"
)
assert publisher.stored_claims({"sha": "somesha", "ref": "someref"}) == {
"sha": "somesha",
"ref": "someref",
}
def test_github_publisher_admin_details_with_environment(self):
publisher = github.GitHubPublisher(
repository_name="fakerepo",
repository_owner="fakeowner",
repository_owner_id="fakeid",
workflow_filename="fakeworkflow.yml",
environment="fakeenv",
)
assert publisher.admin_details == [
("Repository", "fakeowner/fakerepo"),
("Workflow", "fakeworkflow.yml"),
("Owner ID", "fakeid"),
("Environment", "fakeenv"),
]
def test_github_publisher_admin_details_without_environment(self):
publisher = github.GitHubPublisher(
repository_name="fakerepo",
repository_owner="fakeowner",
repository_owner_id="fakeid",
workflow_filename="fakeworkflow.yml",
environment="",
)
assert publisher.admin_details == [
("Repository", "fakeowner/fakerepo"),
("Workflow", "fakeworkflow.yml"),
("Owner ID", "fakeid"),
]
def test_github_publisher_unaccounted_claims(self, monkeypatch):
scope = pretend.stub()
sentry_sdk = pretend.stub(
capture_message=pretend.call_recorder(lambda s: None),
new_scope=pretend.call_recorder(
lambda: pretend.stub(
__enter__=lambda *a: scope, __exit__=lambda *a: None
)
),
)
monkeypatch.setattr(_core, "sentry_sdk", sentry_sdk)
# We don't care if these actually verify, only that they're present.
signed_claims = {
claim_name: "fake"
for claim_name in github.GitHubPublisher.all_known_claims()
}
signed_claims["fake-claim"] = "fake"
signed_claims["another-fake-claim"] = "also-fake"
github.GitHubPublisher.check_claims_existence(signed_claims)
assert sentry_sdk.capture_message.calls == [
pretend.call(
"JWT for GitHubPublisher has unaccounted claims: "
"['another-fake-claim', 'fake-claim']"
)
]
assert scope.fingerprint == ["another-fake-claim", "fake-claim"]
@pytest.mark.parametrize(
"missing",
github.GitHubPublisher.__required_verifiable_claims__.keys()
| github.GitHubPublisher.__required_unverifiable_claims__,
)
def test_github_publisher_missing_claims(self, monkeypatch, missing):
publisher = github.GitHubPublisher(
repository_name="fakerepo",
repository_owner="fakeowner",
repository_owner_id="fakeid",
workflow_filename="fakeworkflow.yml",
)
scope = pretend.stub()
sentry_sdk = pretend.stub(
capture_message=pretend.call_recorder(lambda s: None),
new_scope=pretend.call_recorder(
lambda: pretend.stub(
__enter__=lambda *a: scope, __exit__=lambda *a: None
)
),
)
monkeypatch.setattr(_core, "sentry_sdk", sentry_sdk)
signed_claims = {
claim_name: "fake"
for claim_name in github.GitHubPublisher.all_known_claims()
}
# Pop the missing claim, so that it's missing.
signed_claims.pop(missing)
assert missing not in signed_claims
assert publisher.__required_verifiable_claims__
with pytest.raises(errors.InvalidPublisherError) as e:
github.GitHubPublisher.check_claims_existence(signed_claims)
assert str(e.value) == f"Missing claim {missing!r}"
assert sentry_sdk.capture_message.calls == [
pretend.call(f"JWT for GitHubPublisher is missing claim: {missing}")
]
assert scope.fingerprint == [missing]
def test_github_publisher_missing_optional_claims(self, metrics, monkeypatch):
publisher = github.GitHubPublisher(
repository_name="fakerepo",
repository_owner="fakeowner",
repository_owner_id="fakeid",
workflow_filename="fakeworkflow.yml",
environment="some-environment", # The optional claim that should be present
)
sentry_sdk = pretend.stub(capture_message=pretend.call_recorder(lambda s: None))
monkeypatch.setattr(_core, "sentry_sdk", sentry_sdk)
service_ = pretend.stub(
jwt_identifier_exists=pretend.call_recorder(lambda s: False),
metrics=metrics,
)
signed_claims = {
claim_name: getattr(publisher, claim_name)
for claim_name in github.GitHubPublisher.__required_verifiable_claims__
}
signed_claims["ref"] = "ref"
signed_claims["sha"] = "sha"
signed_claims["job_workflow_ref"] = publisher.job_workflow_ref + "@ref"
assert publisher.__required_verifiable_claims__
with pytest.raises(errors.InvalidPublisherError) as e:
publisher.verify_claims(
signed_claims=signed_claims, publisher_service=service_
)
assert str(e.value) == "Check failed for optional claim 'environment'"
assert sentry_sdk.capture_message.calls == []
@pytest.mark.parametrize("environment", [None, "some-environment"])
@pytest.mark.parametrize(
"missing_claims",
[set(), github.GitHubPublisher.__optional_verifiable_claims__.keys()],
)
def test_github_publisher_verifies(self, monkeypatch, environment, missing_claims):
publisher = github.GitHubPublisher(
repository_name="fakerepo",
repository_owner="fakeowner",
repository_owner_id="fakeid",
workflow_filename="fakeworkflow.yml",
environment=environment,
)
noop_check = pretend.call_recorder(lambda gt, sc, ac, **kwargs: True)
verifiable_claims = {
claim_name: noop_check
for claim_name in publisher.__required_verifiable_claims__
}
monkeypatch.setattr(
publisher, "__required_verifiable_claims__", verifiable_claims
)
optional_verifiable_claims = {
claim_name: noop_check
for claim_name in publisher.__optional_verifiable_claims__
}
monkeypatch.setattr(
publisher, "__optional_verifiable_claims__", optional_verifiable_claims
)
signed_claims = {
claim_name: "fake"
for claim_name in github.GitHubPublisher.all_known_claims()
if claim_name not in missing_claims
}
github.GitHubPublisher.check_claims_existence(signed_claims)
assert publisher.verify_claims(
signed_claims=signed_claims, publisher_service=pretend.stub()
)
assert len(noop_check.calls) == len(verifiable_claims) + len(
optional_verifiable_claims
)
@pytest.mark.parametrize(
("truth", "claim", "valid"),
[
# invalid: claim should never be empty or missing
("", None, False),
("foo", None, False),
("", "", False),
("foo", "", False),
# valid: exact and case-insensitive matches
("foo", "foo", True),
("Foo", "foo", True),
("Foo", "Foo", True),
("foo", "Foo", True),
("FOO", "foo", True),
("foo", "FOO", True),
],
)
def test_check_repository(self, truth, claim, valid):
check = github.GitHubPublisher.__required_verifiable_claims__["repository"]
assert check(truth, claim, pretend.stub()) == valid
def test_check_event_name_emits_metrics(self, metrics):
check = github.GitHubPublisher.__required_verifiable_claims__["event_name"]
publisher_service = pretend.stub(metrics=metrics)
assert check(
"throwaway",
"pull_request_target",
pretend.stub(),
publisher_service=publisher_service,
)
assert metrics.increment.calls == [
pretend.call(
"warehouse.oidc.claim",
tags=["publisher:GitHub", "event_name:pull_request_target"],
),
]
@pytest.mark.parametrize(
("claim", "ref", "sha", "valid", "expected"),
[
# okay: workflow name, followed by a nonempty ref
(
"foo/bar/.github/workflows/baz.yml@refs/tags/v0.0.1",
"refs/tags/v0.0.1",
"somesha",
True,
None,
),
(
"foo/bar/.github/workflows/baz.yml@refs/pulls/6",
"refs/pulls/6",
"somesha",
True,
None,
),
(
"foo/bar/.github/workflows/baz.yml@refs/heads/main",
"refs/heads/main",
"somesha",
True,
None,
),
(
"foo/bar/.github/workflows/baz.yml@notrailingslash",
"notrailingslash",
"somesha",
True,
None,
),
# okay: workflow name, followed by a nonempty sha
(
"foo/bar/.github/workflows/baz.yml@somesha",
"someref",
"somesha",
True,
None,
),
(
"foo/bar/.github/workflows/baz.yml@somesha",
None,
"somesha",
True,
None,
),
(
"foo/bar/.github/workflows/baz.yml@somesha",
"",
"somesha",
True,
None,
),
# bad: both ref and sha are missing
(
"foo/bar/.github/workflows/baz.yml@missing",
None,
None,
False,
"The ref and sha claims are empty",
),
(
"foo/bar/.github/workflows/baz.yml@missing",
"",
"",
False,
"The ref and sha claims are empty",
),
# bad: workflow name with various attempted impersonations on the ref
(
"foo/bar/.github/workflows/baz.yml@fake.yml@notrailingslash",
"somesha",
"notrailingslash",
False,
"The job_workflow_ref claim does not match, expecting one of "
"['foo/bar/.github/workflows/baz.yml@notrailingslash', "
"'foo/bar/.github/workflows/baz.yml@somesha'], "
"got 'foo/bar/.github/workflows/baz.yml@fake.yml@notrailingslash'",
),
(
"foo/bar/.github/workflows/baz.yml@fake.yml@refs/pulls/6",
"somesha",
"refs/pulls/6",
False,
"The job_workflow_ref claim does not match, expecting one of "
"['foo/bar/.github/workflows/baz.yml@refs/pulls/6', "
"'foo/bar/.github/workflows/baz.yml@somesha'], "
"got 'foo/bar/.github/workflows/baz.yml@fake.yml@refs/pulls/6'",
),
# bad: missing tail or workflow name or otherwise partial
(
"foo/bar/.github/workflows/baz.yml@",
"somesha",
"notrailingslash",
False,
"The job_workflow_ref claim does not match, expecting one of "
"['foo/bar/.github/workflows/baz.yml@notrailingslash', "
"'foo/bar/.github/workflows/baz.yml@somesha'], "
"got 'foo/bar/.github/workflows/baz.yml@'",
),
(
"foo/bar/.github/workflows/@",
"somesha",
"notrailingslash",
False,
"The job_workflow_ref claim does not match, expecting one of "
"['foo/bar/.github/workflows/baz.yml@notrailingslash', "
"'foo/bar/.github/workflows/baz.yml@somesha'], "
"got 'foo/bar/.github/workflows/@'",
),
(
"foo/bar/.github/workflows/",
"somesha",
"notrailingslash",
False,
"The job_workflow_ref claim does not match, expecting one of "
"['foo/bar/.github/workflows/baz.yml@notrailingslash', "
"'foo/bar/.github/workflows/baz.yml@somesha'], "
"got 'foo/bar/.github/workflows/'",
),
(
"baz.yml",
"somesha",
"notrailingslash",
False,
"The job_workflow_ref claim does not match, expecting one of "
"['foo/bar/.github/workflows/baz.yml@notrailingslash', "
"'foo/bar/.github/workflows/baz.yml@somesha'], "
"got 'baz.yml'",
),
(
"foo/bar/.github/workflows/baz.yml@malicious.yml@",
"somesha",
"notrailingslash",
False,
"The job_workflow_ref claim does not match, expecting one of "
"['foo/bar/.github/workflows/baz.yml@notrailingslash', "
"'foo/bar/.github/workflows/baz.yml@somesha'], "
"got 'foo/bar/.github/workflows/baz.yml@malicious.yml@'",
),
(
"foo/bar/.github/workflows/baz.yml@@",
"somesha",
"notrailingslash",
False,
"The job_workflow_ref claim does not match, expecting one of "
"['foo/bar/.github/workflows/baz.yml@notrailingslash', "
"'foo/bar/.github/workflows/baz.yml@somesha'], "
"got 'foo/bar/.github/workflows/baz.yml@@'",
),
("", None, None, False, "The job_workflow_ref claim is empty"),
],
)
def test_github_publisher_job_workflow_ref(self, claim, ref, sha, valid, expected):
publisher = github.GitHubPublisher(
repository_name="bar",
repository_owner="foo",
repository_owner_id=pretend.stub(),
workflow_filename="baz.yml",
)
check = github.GitHubPublisher.__required_verifiable_claims__[
"job_workflow_ref"
]
claims = {"ref": ref, "sha": sha}
if valid:
assert check(publisher.job_workflow_ref, claim, claims) is True
else:
with pytest.raises(errors.InvalidPublisherError) as e:
check(publisher.job_workflow_ref, claim, claims) is True
assert str(e.value) == expected
@pytest.mark.parametrize(
("truth", "claim", "valid"),
[
("repo:foo/bar", "repo:foo/bar:someotherstuff", True),
("repo:foo/bar", "repo:foo/bar:", True),
("repo:fOo/BaR", "repo:foo/bar", True),
("repo:foo/bar", "repo:fOo/BaR:", True),
("repo:foo/bar:someotherstuff", "repo:foo/bar", False),
("repo:foo/bar-baz", "repo:foo/bar", False),
("repo:foo/bar", "repo:foo/bar-baz", False),
],
)
def test_github_publisher_sub_claim(self, truth, claim, valid):
check = github.GitHubPublisher.__required_verifiable_claims__["sub"]
assert check(truth, claim, pretend.stub()) is valid
@pytest.mark.parametrize(
("truth", "claim", "valid"),
[
("", None, True),
("", "", True),
("", "some-environment", True),
("some-environment", "some-environment", True),
("some-environment", "sOmE-eNvIrOnMeNt", True),
("some-environment", None, False),
("some-environment", "some-other-environment", False),
],
)
def test_github_publisher_environment_claim(self, truth, claim, valid):
check = github.GitHubPublisher.__optional_verifiable_claims__["environment"]
assert check(truth, claim, pretend.stub()) is valid
def test_github_publisher_duplicates_cant_be_created(self, db_request):
publisher1 = github.GitHubPublisher(
repository_name="repository_name",
repository_owner="repository_owner",
repository_owner_id="666",
workflow_filename="workflow_filename.yml",
environment="",
)
db_request.db.add(publisher1)
db_request.db.commit()
publisher2 = github.GitHubPublisher(
repository_name="repository_name",
repository_owner="repository_owner",
repository_owner_id="666",
workflow_filename="workflow_filename.yml",
environment="",
)
db_request.db.add(publisher2)
with pytest.raises(psycopg.errors.UniqueViolation):
db_request.db.commit()
@pytest.mark.parametrize(
"repository_name",
[
"repository_name",
"Repository_Name",
],
)
@pytest.mark.parametrize(
"repository_owner",
[
"repository_owner",
"Repository_Owner",
],
)
@pytest.mark.parametrize(
("url", "expected"),
[
("https://github.com/repository_owner/repository_name.git", True),
("https://github.com/repository_owner/repository_name.git/", True),
("https://github.com/repository_owner/repository_name.git/issues", False),
("https://repository_owner.github.io/repository_name/", True),
("https://repository_owner.github.io/repository_name", True),
("https://repository_owner.github.io/repository_name/subpage", True),
("https://repository_owner.github.io/repository_name/../malicious", False),
("https://repository_owner.github.io/", False),
("https://repository_owner.github.io/unrelated_name/", False),
("https://github.com/RePosItory_OwNeR/rePository_Name.git", True),
("https://repository_owner.github.io/RePoSiToRy_NaMe/subpage", True),
],
)
def test_github_publisher_verify_url(
self, url, expected, repository_name, repository_owner
):
publisher = github.GitHubPublisher(
repository_name=repository_name,
repository_owner=repository_owner,
repository_owner_id="666",
workflow_filename="workflow_filename.yml",
environment="",
)
assert publisher.verify_url(url) == expected
@pytest.mark.parametrize("environment", ["", "some-env"])
def test_github_publisher_attestation_identity(self, environment):
publisher = github.GitHubPublisher(
repository_name="repository_name",
repository_owner="repository_owner",
repository_owner_id="666",
workflow_filename="workflow_filename.yml",
environment=environment,
)
identity = publisher.attestation_identity
assert identity.repository == publisher.repository
assert identity.workflow == publisher.workflow_filename
if not environment:
assert identity.environment is None
else:
assert identity.environment == publisher.environment
@pytest.mark.parametrize("exists_in_db", [True, False])
def test_exists(self, db_request, exists_in_db):
publisher = github.GitHubPublisher(
repository_name="repository_name",
repository_owner="repository_owner",
repository_owner_id="666",
workflow_filename="workflow_filename.yml",
environment="",
)
if exists_in_db:
db_request.db.add(publisher)
db_request.db.flush()
assert publisher.exists(db_request.db) == exists_in_db
| TestGitHubPublisher |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/director.py | {
"start": 1501,
"end": 33004
} | class ____:
"""
Encapsulates all the logic to perform a build for user's documentation.
This class handles all the VCS commands, setup OS and language (e.g. only
Python for now) environment (via virtualenv or conda), installs all the
required basic and user packages, and finally execute the build commands
(e.g. Sphinx or MkDocs) to generate the artifacts.
Note that this class *is not* in charge of doing anything related to Read
the Docs, the platform, itself. These include not updating the `Build`'s
status, or uploading the artifacts to the storage, creating the search
index, among others.
"""
def __init__(self, data):
"""
Initializer.
:param data: object with all the data grabbed by Celery task in
``before_start`` and used as a way to share data with this class
by-directionally.
:type data: readthedocs.projects.tasks.builds.TaskData
"""
self.data = data
def setup_vcs(self):
"""
Perform all VCS related steps.
1. clone the repository
2. checkout specific commit/identifier
3. load the config file
4. checkout submodules
"""
# Make dirs if it does not exist to clone the repository under it
if not os.path.exists(self.data.project.doc_path):
os.makedirs(self.data.project.doc_path)
if not self.data.project.vcs_class():
raise RepositoryError(RepositoryError.UNSUPPORTED_VCS)
before_vcs.send(
sender=self.data.version,
environment=self.vcs_environment,
)
# Create the VCS repository where all the commands are going to be
# executed for a particular VCS type
self.vcs_repository = self.data.project.vcs_repo(
version=self.data.version,
environment=self.vcs_environment,
)
# We can't do too much on ``pre_checkout`` because we haven't
# cloned the repository yet and we don't know what the user wrote
# in the `.readthedocs.yaml` yet.
#
# We could implement something different in the future if we download
# the `.readthedocs.yaml` file without cloning.
# See https://github.com/readthedocs/readthedocs.org/issues/8935
#
# self.run_build_job("pre_checkout")
self.checkout()
self.run_build_job("post_checkout")
commit = self.data.build_commit or self.vcs_repository.commit
if commit:
self.data.build["commit"] = commit
def create_vcs_environment(self):
self.vcs_environment = self.data.environment_class(
project=self.data.project,
version=self.data.version,
build=self.data.build,
environment=self.get_vcs_env_vars(),
container_image=settings.RTD_DOCKER_CLONE_IMAGE,
api_client=self.data.api_client,
)
def create_build_environment(self):
self.build_environment = self.data.environment_class(
project=self.data.project,
version=self.data.version,
config=self.data.config,
build=self.data.build,
environment=self.get_build_env_vars(),
api_client=self.data.api_client,
)
def setup_environment(self):
"""
Create the environment and install required dependencies.
1. install OS dependencies (apt)
2. create language (e.g. Python) environment
3. install dependencies into the environment
"""
# Environment used for building code, usually with Docker
language_environment_cls = Virtualenv
if self.data.config.is_using_conda:
language_environment_cls = Conda
self.language_environment = language_environment_cls(
version=self.data.version,
build_env=self.build_environment,
config=self.data.config,
)
# TODO: check if `before_build` and `after_build` are still useful
# (maybe in commercial?)
#
# I didn't find they are used anywhere, we should probably remove them
before_build.send(
sender=self.data.version,
environment=self.build_environment,
)
self.run_build_job("pre_system_dependencies")
self.system_dependencies()
self.run_build_job("post_system_dependencies")
# Install all ``build.tools`` specified by the user
self.install_build_tools()
self.run_build_job("pre_create_environment")
self.create_environment()
self.run_build_job("post_create_environment")
self.run_build_job("pre_install")
self.install()
self.run_build_job("post_install")
def build(self):
"""
Build all the formats specified by the user.
1. build HTML
2. build HTMLZzip
3. build PDF
4. build ePub
"""
self.run_build_job("pre_build")
# Build all formats
self.build_html()
self.build_htmlzip()
self.build_pdf()
self.build_epub()
self.run_build_job("post_build")
self.store_readthedocs_build_yaml()
after_build.send(
sender=self.data.version,
)
# VCS checkout
def checkout(self):
"""Checkout Git repo and load build config file."""
log.info("Cloning and fetching.")
self.vcs_repository.update()
# Check if the key has write access to the repository (RTD Business only).
# This check is done immediately after clone step, and before running any
# commands that make use of user given input (like the post_checkout job).
has_ssh_key_with_write_access = False
if settings.ALLOW_PRIVATE_REPOS:
has_ssh_key_with_write_access = self.vcs_repository.has_ssh_key_with_write_access()
if has_ssh_key_with_write_access != self.data.project.has_ssh_key_with_write_access:
self.data.api_client.project(self.data.project.pk).patch(
{"has_ssh_key_with_write_access": has_ssh_key_with_write_access}
)
now = datetime.datetime.now(tz=datetime.timezone.utc)
hard_failure = now >= datetime.datetime(
2025, 12, 1, 0, 0, 0, tzinfo=datetime.timezone.utc
)
if has_ssh_key_with_write_access:
if hard_failure and settings.RTD_ENFORCE_BROWNOUTS_FOR_DEPRECATIONS:
raise BuildUserError(BuildUserError.SSH_KEY_WITH_WRITE_ACCESS)
else:
self.attach_notification(
attached_to=f"project/{self.data.project.pk}",
message_id=MESSAGE_PROJECT_SSH_KEY_WITH_WRITE_ACCESS,
dismissable=True,
)
identifier = self.data.build_commit or self.data.version.identifier
log.info("Checking out.", identifier=identifier)
self.vcs_repository.checkout(identifier)
# The director is responsible for understanding which config file to use for a build.
# In order to reproduce a build 1:1, we may use readthedocs_yaml_path defined by the build
# instead of per-version or per-project.
# Use the below line to fetch the readthedocs_yaml_path defined per-build.
# custom_config_file = self.data.build.get("readthedocs_yaml_path", None)
custom_config_file = None
# This logic can be extended with version-specific config files
if not custom_config_file and self.data.version.project.readthedocs_yaml_path:
custom_config_file = self.data.version.project.readthedocs_yaml_path
if custom_config_file:
log.info("Using a custom .readthedocs.yaml file.", path=custom_config_file)
checkout_path = self.data.project.checkout_path(self.data.version.slug)
default_config_file = find_one(checkout_path, CONFIG_FILENAME_REGEX)
final_config_file = custom_config_file or default_config_file
# Output the path for the config file used.
# This works as confirmation for us & the user about which file is used,
# as well as the fact that *any* config file is used.
if final_config_file:
self.vcs_environment.run(
"cat",
# Show user the relative path to the config file
# TODO: Have our standard path replacement code catch this.
# https://github.com/readthedocs/readthedocs.org/pull/10413#discussion_r1230765843
final_config_file.replace(checkout_path + "/", ""),
cwd=checkout_path,
)
self.data.config = load_yaml_config(
version=self.data.version,
readthedocs_yaml_path=custom_config_file,
)
self.data.build["config"] = self.data.config.as_dict()
self.data.build["readthedocs_yaml_path"] = custom_config_file
# Raise a build error if the project is not using a config file or using v1
if self.data.config.version not in ("2", 2):
raise BuildUserError(BuildUserError.NO_CONFIG_FILE_DEPRECATED)
# Raise a build error if the project is using "build.image" on their config file
build_config_key = self.data.config.source_config.get("build", {})
if "image" in build_config_key:
raise BuildUserError(BuildUserError.BUILD_IMAGE_CONFIG_KEY_DEPRECATED)
# TODO: move this validation to the Config object once we are settled here
if "image" not in build_config_key and "os" not in build_config_key:
raise BuildUserError(BuildUserError.BUILD_OS_REQUIRED)
self.vcs_repository.update_submodules(self.data.config)
# If the config has a post_checkout job, we stop the build,
# as it could be abused to write to the repository.
if has_ssh_key_with_write_access and get_dotted_attribute(
self.data.config, "build.jobs.post_checkout", None
):
raise BuildUserError(BuildUserError.SSH_KEY_WITH_WRITE_ACCESS)
# System dependencies (``build.apt_packages``)
# NOTE: `system_dependencies` should not be possible to override by the
# user because it's executed as ``RTD_DOCKER_USER`` (e.g. ``root``) user.
def system_dependencies(self):
"""
Install apt packages from the config file.
We don't allow to pass custom options or install from a path.
The packages names are already validated when reading the config file.
.. note::
``--quiet`` won't suppress the output,
it would just remove the progress bar.
"""
packages = self.data.config.build.apt_packages
if packages:
self.build_environment.run(
"apt-get",
"update",
"--assume-yes",
"--quiet",
user=settings.RTD_DOCKER_SUPER_USER,
)
# put ``--`` to end all command arguments.
self.build_environment.run(
"apt-get",
"install",
"--assume-yes",
"--quiet",
"--",
*packages,
user=settings.RTD_DOCKER_SUPER_USER,
)
# Language environment
def create_environment(self):
if self.data.config.build.jobs.create_environment is not None:
self.run_build_job("create_environment")
return
# If the builder is generic, we have nothing to do here,
# as the commnads are provided by the user.
if self.data.config.doctype == GENERIC:
return
self.language_environment.setup_base()
# Install
def install(self):
if self.data.config.build.jobs.install is not None:
self.run_build_job("install")
return
# If the builder is generic, we have nothing to do here,
# as the commnads are provided by the user.
if self.data.config.doctype == GENERIC:
return
self.language_environment.install_core_requirements()
self.language_environment.install_requirements()
# Build
def build_html(self):
if self.data.config.build.jobs.build.html is not None:
self.run_build_job("build.html")
return
return self.build_docs_class(self.data.config.doctype)
def build_pdf(self):
if "pdf" not in self.data.config.formats or self.data.version.type == EXTERNAL:
return False
if self.data.config.build.jobs.build.pdf is not None:
self.run_build_job("build.pdf")
return
# Mkdocs has no pdf generation currently.
if self.is_type_sphinx():
return self.build_docs_class("sphinx_pdf")
return False
def build_htmlzip(self):
if "htmlzip" not in self.data.config.formats or self.data.version.type == EXTERNAL:
return False
if self.data.config.build.jobs.build.htmlzip is not None:
self.run_build_job("build.htmlzip")
return
# We don't generate a zip for mkdocs currently.
if self.is_type_sphinx():
return self.build_docs_class("sphinx_singlehtmllocalmedia")
return False
def build_epub(self):
if "epub" not in self.data.config.formats or self.data.version.type == EXTERNAL:
return False
if self.data.config.build.jobs.build.epub is not None:
self.run_build_job("build.epub")
return
# Mkdocs has no epub generation currently.
if self.is_type_sphinx():
return self.build_docs_class("sphinx_epub")
return False
def run_build_job(self, job):
"""
Run a command specified by the user under `build.jobs.` config key.
It uses the "VCS environment" for pre_/post_ checkout jobs and "build
environment" for the rest of them.
Note that user's commands:
- are not escaped
- are run with under the path where the repository was cloned
- are run as RTD_DOCKER_USER user
- users can't run commands as `root` user
- all the user's commands receive same environment variables as regular commands
Example:
build:
jobs:
pre_install:
- echo `date`
- python path/to/myscript.py
pre_build:
- sed -i **/*.rst -e "s|{version}|v3.5.1|g"
build:
html:
- make html
pdf:
- make pdf
In this case, `self.data.config.build.jobs.pre_build` will contains
`sed` command.
"""
commands = get_dotted_attribute(self.data.config, f"build.jobs.{job}", None)
if not commands:
return
cwd = self.data.project.checkout_path(self.data.version.slug)
environment = self.vcs_environment
if job not in ("pre_checkout", "post_checkout"):
environment = self.build_environment
for command in commands:
environment.run(command, escape_command=False, cwd=cwd)
def check_old_output_directory(self):
"""
Check if there the directory '_build/html' exists and fail the build if so.
Read the Docs used to build artifacts into '_build/html' and there are
some projects with this path hardcoded in their files. Those builds are
having unexpected behavior since we are not using that path anymore.
In case we detect they are keep using that path, we fail the build
explaining this.
"""
command = self.build_environment.run(
"test",
"-x",
"_build/html",
cwd=self.data.project.checkout_path(self.data.version.slug),
record=False,
)
if command.exit_code == 0:
log.warning("Directory '_build/html' exists. This may lead to unexpected behavior.")
raise BuildUserError(BuildUserError.BUILD_OUTPUT_OLD_DIRECTORY_USED)
def run_build_commands(self):
"""Runs each build command in the build environment."""
python_reshim_commands = (
{"pip", "install"},
{"conda", "create"},
{"conda", "install"},
{"mamba", "create"},
{"mamba", "install"},
{"poetry", "install"},
)
rust_reshim_commands = ({"cargo", "install"},)
cwd = self.data.project.checkout_path(self.data.version.slug)
environment = self.build_environment
for command in self.data.config.build.commands:
environment.run(command, escape_command=False, cwd=cwd)
# Execute ``asdf reshim python`` if the user is installing a
# package since the package may contain an executable
# See https://github.com/readthedocs/readthedocs.org/pull/9150#discussion_r882849790
for python_reshim_command in python_reshim_commands:
# Convert tuple/list into set to check reshim command is a
# subset of the command itself. This is to find ``pip install``
# but also ``pip -v install`` and ``python -m pip install``
if python_reshim_command.issubset(command.split()):
environment.run(
*["asdf", "reshim", "python"],
escape_command=False,
cwd=cwd,
record=False,
)
# Do same for Rust
for rust_reshim_command in rust_reshim_commands:
if rust_reshim_command.issubset(command.split()):
environment.run(
*["asdf", "reshim", "rust"],
escape_command=False,
cwd=cwd,
record=False,
)
html_output_path = os.path.join(cwd, BUILD_COMMANDS_OUTPUT_PATH_HTML)
if not os.path.exists(html_output_path):
raise BuildUserError(BuildUserError.BUILD_COMMANDS_WITHOUT_OUTPUT)
# Update the `Version.documentation_type` to match the doctype defined
# by the config file. When using `build.commands` it will be `GENERIC`
self.data.version.documentation_type = self.data.config.doctype
self.store_readthedocs_build_yaml()
def install_build_tools(self):
"""
Install all ``build.tools`` defined by the user in the config file.
It uses ``asdf`` behind the scenes to manage all the tools and versions
of them. These tools/versions are stored in the Cloud cache and are
downloaded on each build (~50 - ~100Mb).
If the requested tool/version is not present in the cache, it's
installed via ``asdf`` on the fly.
"""
if settings.RTD_DOCKER_COMPOSE:
# Create a symlink for ``root`` user to use the same ``.asdf``
# installation as the ``docs`` user. Required for local building
# since everything is run as ``root`` when using Local Development
# instance
cmd = [
"ln",
"-s",
os.path.join(settings.RTD_DOCKER_WORKDIR, ".asdf"),
"/root/.asdf",
]
self.build_environment.run(
*cmd,
record=False,
)
build_tools_storage = get_storage(
project=self.data.project,
build_id=self.data.build["id"],
api_client=self.data.api_client,
storage_type=StorageType.build_tools,
)
for tool, version in self.data.config.build.tools.items():
full_version = version.full_version # e.g. 3.9 -> 3.9.7
# TODO: generate the correct path for the Python version
# see https://github.com/readthedocs/readthedocs.org/pull/8447#issuecomment-911562267
# tool_path = f'{self.config.build.os}/{tool}/2021-08-30/{full_version}.tar.gz'
build_os = self.data.config.build.os
if build_os == "ubuntu-lts-latest":
_, build_os = settings.RTD_DOCKER_BUILD_SETTINGS["os"]["ubuntu-lts-latest"].split(
":"
)
tool_path = f"{build_os}-{tool}-{full_version}.tar.gz"
tool_version_cached = build_tools_storage.exists(tool_path)
if tool_version_cached:
remote_fd = build_tools_storage.open(tool_path, mode="rb")
with tarfile.open(fileobj=remote_fd) as tar:
# Extract it on the shared path between host and Docker container
extract_path = os.path.join(self.data.project.doc_path, "tools")
tar.extractall(extract_path)
# Move the extracted content to the ``asdf`` installation
cmd = [
"mv",
f"{extract_path}/{full_version}",
os.path.join(
settings.RTD_DOCKER_WORKDIR,
f".asdf/installs/{tool}/{full_version}",
),
]
self.build_environment.run(
*cmd,
record=False,
)
else:
log.debug(
"Cached version for tool not found.",
os=self.data.config.build.os,
tool=tool,
full_version=full_version,
tool_path=tool_path,
)
# If the tool version selected is not available from the
# cache we compile it at build time
cmd = [
# TODO: make ``PYTHON_CONFIGURE_OPTS="--enable-shared"``
# environment variable to work here. Note that
# ``self.build_environment.run`` does not support passing
# environment for a particular command:
# https://github.com/readthedocs/readthedocs.org/blob/9d2d1a2/readthedocs/doc_builder/environments.py#L430-L431
"asdf",
"install",
tool,
full_version,
]
self.build_environment.run(
*cmd,
)
# Make the tool version chosen by the user the default one
cmd = [
"asdf",
"global",
tool,
full_version,
]
self.build_environment.run(
*cmd,
)
# Recreate shims for this tool to make the new version
# installed available
# https://asdf-vm.com/learn-more/faq.html#newly-installed-exectable-not-running
cmd = [
"asdf",
"reshim",
tool,
]
self.build_environment.run(
*cmd,
record=False,
)
if all(
[
tool == "python",
# Do not install them if the tool version was cached
# because these dependencies are already installed when
# created with our script and uploaded to the cache's
# bucket
not tool_version_cached,
# Do not install them on conda/mamba since they are not
# needed because the environment is managed by conda/mamba
# itself
self.data.config.python_interpreter not in ("conda", "mamba"),
]
):
# We cap setuptools to avoid breakage of projects
# relying on setup.py invokations,
# see https://github.com/readthedocs/readthedocs.org/issues/8659
setuptools_version = (
"setuptools<58.3.0"
if self.data.config.is_using_setup_py_install
else "setuptools"
)
# Install our own requirements if the version is compiled
cmd = [
"python",
"-mpip",
"install",
"-U",
"virtualenv",
setuptools_version,
]
self.build_environment.run(
*cmd,
)
# Helpers
#
# TODO: move somewhere or change names to make them private or something to
# easily differentiate them from the normal flow.
def build_docs_class(self, builder_class):
"""
Build docs with additional doc backends.
These steps are not necessarily required for the build to halt, so we
only raise a warning exception here. A hard error will halt the build
process.
"""
# If the builder is generic, we have nothing to do here,
# as the commnads are provided by the user.
if builder_class == GENERIC:
return
builder = get_builder_class(builder_class)(
build_env=self.build_environment,
python_env=self.language_environment,
)
if builder_class == self.data.config.doctype:
builder.show_conf()
self.data.version.documentation_type = builder.get_final_doctype()
success = builder.build()
return success
def get_vcs_env_vars(self):
"""Get environment variables to be included in the VCS setup step."""
env = self.get_rtd_env_vars()
# Don't prompt for username, this requires Git 2.3+
env["GIT_TERMINAL_PROMPT"] = "0"
env["READTHEDOCS_GIT_CLONE_TOKEN"] = self.data.project.clone_token
return env
def get_rtd_env_vars(self):
"""Get bash environment variables specific to Read the Docs."""
env = {
"READTHEDOCS": "True",
"READTHEDOCS_VERSION": self.data.version.slug,
"READTHEDOCS_VERSION_TYPE": self.data.version.type,
"READTHEDOCS_VERSION_NAME": self.data.version.verbose_name,
"READTHEDOCS_PROJECT": self.data.project.slug,
"READTHEDOCS_LANGUAGE": self.data.project.language,
"READTHEDOCS_REPOSITORY_PATH": self.data.project.checkout_path(self.data.version.slug),
"READTHEDOCS_OUTPUT": os.path.join(
self.data.project.checkout_path(self.data.version.slug), "_readthedocs/"
),
"READTHEDOCS_GIT_CLONE_URL": self.data.project.repo,
# TODO: we don't have access to the database from the builder.
# We need to find a way to expose HTML_URL here as well.
# "READTHEDOCS_GIT_HTML_URL": self.data.project.remote_repository.html_url,
"READTHEDOCS_GIT_IDENTIFIER": self.data.version.git_identifier,
"READTHEDOCS_GIT_COMMIT_HASH": self.data.build["commit"],
"READTHEDOCS_PRODUCTION_DOMAIN": settings.PRODUCTION_DOMAIN,
}
return env
def get_build_env_vars(self):
"""Get bash environment variables used for all builder commands."""
env = self.get_rtd_env_vars()
# https://no-color.org/
env["NO_COLOR"] = "1"
if self.data.config.conda is not None:
env.update(
{
# NOTE: should these be prefixed with "READTHEDOCS_"?
"CONDA_ENVS_PATH": os.path.join(self.data.project.doc_path, "conda"),
"CONDA_DEFAULT_ENV": self.data.version.slug,
"BIN_PATH": os.path.join(
self.data.project.doc_path,
"conda",
self.data.version.slug,
"bin",
),
}
)
else:
env.update(
{
"BIN_PATH": os.path.join(
self.data.project.doc_path,
"envs",
self.data.version.slug,
"bin",
),
"READTHEDOCS_VIRTUALENV_PATH": os.path.join(
self.data.project.doc_path, "envs", self.data.version.slug
),
}
)
env.update(
{
"READTHEDOCS_CANONICAL_URL": self.data.version.canonical_url,
}
)
# Update environment from Project's specific environment variables,
# avoiding to expose private environment variables
# if the version is external (i.e. a PR build).
env.update(
self.data.project.environment_variables(public_only=self.data.version.is_external)
)
return env
def is_type_sphinx(self):
"""Is documentation type Sphinx."""
return "sphinx" in self.data.config.doctype
def store_readthedocs_build_yaml(self):
# load YAML from user
yaml_path = os.path.join(
self.data.project.artifact_path(version=self.data.version.slug, type_="html"),
"readthedocs-build.yaml",
)
if not os.path.exists(yaml_path):
log.debug("Build output YAML file (readtehdocs-build.yaml) does not exist.")
return
try:
with safe_open(yaml_path, "r") as f:
data = yaml.safe_load(f)
except Exception:
# NOTE: skip this work for now until we decide whether or not this
# YAML file is required.
#
# NOTE: decide whether or not we want this
# file to be mandatory and raise an exception here.
return
log.info("readthedocs-build.yaml loaded.", path=yaml_path)
# TODO: validate the YAML generated by the user
# self._validate_readthedocs_build_yaml(data)
# Copy the YAML data into `Version.build_data`.
# It will be saved when the API is hit.
# This data will be used by the `/_/readthedocs-config.json` API endpoint.
self.data.version.build_data = data
def attach_notification(
self,
attached_to,
message_id,
format_values=None,
state="unread",
dismissable=False,
news=False,
):
"""
Attach a notification to build in progress using the APIv2.
:param attached_to: The object to which the notification is attached.
It should have the form of `project/{project_id}` or `build/{build_id}`.
"""
format_values = format_values or {}
# NOTE: we are using APIv2 here because it uses BuildAPIKey authentication,
# which is not currently supported by APIv3.
self.data.api_client.notifications.post(
{
"attached_to": attached_to,
"message_id": message_id,
"state": state, # Optional
"dismissable": dismissable,
"news": news,
"format_values": format_values,
}
)
| BuildDirector |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/glib2.py | {
"start": 10929,
"end": 11394
} | class ____(glib_gresource_base):
vars = ['GLIB_COMPILE_RESOURCES']
fun_h = Task.compile_fun_shell(
glib_gresource_base.base_cmd + ' --target=${TGT[0].abspath()} --generate-header ${SRC}'
)
fun_c = Task.compile_fun_shell(
glib_gresource_base.base_cmd + ' --target=${TGT[1].abspath()} --generate-source ${SRC}'
)
ext_out = ['.h']
def run(self):
return self.fun_h[0](self) or self.fun_c[0](self)
| glib_gresource_source |
python | dagster-io__dagster | python_modules/automation/python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/test_comprehensive.py | {
"start": 37042,
"end": 41584
} | class ____:
"""Comprehensive tests for ai-review-update targeting 80% coverage."""
def test_import_and_basic_structure(self):
"""Test that command can be imported and has expected structure."""
from automation.dagster_dev.commands.ai_review_update import update_pr
assert update_pr is not None
assert update_pr.name == "ai-review-update"
assert callable(update_pr)
def test_help_command(self):
"""Test that help command works and contains expected content."""
from automation.dagster_dev.commands.ai_review_update import update_pr
runner = CliRunner()
result = runner.invoke(update_pr, ["--help"])
assert result.exit_code == 0
assert "ai-review-update" in result.output
assert "--title" in result.output
assert "--body" in result.output
assert "--auto-prepare" in result.output
def test_required_parameters(self):
"""Test that title and body parameters are required."""
from automation.dagster_dev.commands.ai_review_update import update_pr
runner = CliRunner()
# Missing title
result = runner.invoke(update_pr, ["--body", "test body"])
assert result.exit_code != 0
assert "Missing option '--title'" in result.output
# Missing body
result = runner.invoke(update_pr, ["--title", "test title"])
assert result.exit_code != 0
assert "Missing option '--body'" in result.output
def test_command_structure_basic(self):
"""Test basic command structure without execution."""
from automation.dagster_dev.commands.ai_review_update import update_pr
# Just test that command structure is valid
assert hasattr(update_pr, "params")
param_names = [p.name for p in update_pr.params]
assert "title" in param_names
assert "body" in param_names
assert "auto_prepare" in param_names
@patch("subprocess.run")
def test_get_pr_number_success(self, mock_subprocess):
"""Test get_pr_number when PR exists."""
mock_subprocess.return_value = Mock(returncode=0, stdout="123")
from automation.dagster_dev.commands.ai_review_update import get_pr_number
result = get_pr_number()
assert result == "123"
@patch("subprocess.run")
def test_get_pr_number_no_pr(self, mock_subprocess):
"""Test get_pr_number when no PR exists."""
mock_subprocess.return_value = Mock(returncode=1, stdout="")
from automation.dagster_dev.commands.ai_review_update import get_pr_number
runner = CliRunner()
with runner.isolated_filesystem():
with pytest.raises(SystemExit):
get_pr_number()
def test_command_structure_validation(self):
"""Test basic command structure without complex mocking."""
from automation.dagster_dev.commands.ai_review_update import update_pr
# Test that command validates required parameters
runner = CliRunner()
result = runner.invoke(update_pr, [])
assert result.exit_code != 0
assert "--title" in result.output or "--body" in result.output
def test_run_command_optional_success(self):
"""Test run_command_optional with successful command."""
with patch("subprocess.run") as mock_subprocess:
completed_process = subprocess.CompletedProcess(
args=["echo", "test"], returncode=0, stdout="success", stderr=""
)
mock_subprocess.return_value = completed_process
from automation.dagster_dev.commands.ai_review_update import run_command_optional
result = run_command_optional(["echo", "test"], "testing")
assert result.success is True
assert result.error_message is None
def test_run_command_optional_failure(self):
"""Test run_command_optional with failed command."""
with patch("subprocess.run") as mock_subprocess:
error = subprocess.CalledProcessError(1, "cmd")
error.stdout = "out"
error.stderr = "err"
mock_subprocess.side_effect = error
from automation.dagster_dev.commands.ai_review_update import run_command_optional
result = run_command_optional(["false"], "testing failure")
assert result.success is False
assert result.error_message is not None
assert "Error testing failure" in result.error_message
| TestAiReviewUpdateComprehensive |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_set_row01.py | {
"start": 315,
"end": 1157
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("set_row01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_row(0, 0.75)
worksheet.set_row(1, 1.50)
worksheet.set_row(2, 2.25)
worksheet.set_row(3, 3)
worksheet.set_row(11, 9)
worksheet.set_row(12, 9.75)
worksheet.set_row(13, 10.50)
worksheet.set_row(14, 11.25)
worksheet.set_row(18, 14.25)
worksheet.set_row(20, 15.75)
worksheet.set_row(21, 16.50)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 34170,
"end": 34613
} | class ____(ChainedAssetSelection):
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
selection = self.child.resolve_inner(asset_graph, allow_missing=allow_missing)
return fetch_sinks(asset_graph.asset_dep_graph, selection)
def to_selection_str(self) -> str:
return f"sinks({self.child.to_selection_str()})"
@whitelist_for_serdes
| SinksAssetSelection |
python | apache__thrift | lib/py/src/server/TNonblockingServer.py | {
"start": 1299,
"end": 2671
} | class ____(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logger.exception("Exception while processing request", exc_info=True)
callback(False, b'')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"""Decorator which locks self.lock."""
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"""Decorator close object on socket.error."""
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
logger.debug('ignoring socket exception', exc_info=True)
self.close()
return read
| Worker |
python | huggingface__transformers | src/transformers/models/owlv2/modeling_owlv2.py | {
"start": 13001,
"end": 16683
} | class ____(nn.Module):
def __init__(self, config: Owlv2VisionConfig):
super().__init__()
self.patch_size = config.patch_size
self.config = config
self.embed_dim = config.hidden_size
self.class_embedding = nn.Parameter(torch.randn(config.hidden_size))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=config.patch_size,
stride=config.patch_size,
bias=False,
)
self.num_patches = (config.image_size // config.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings.interpolate_pos_encoding
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
patch_embeds = self.patch_embedding(pixel_values) # shape = [batch_size, num_channels, height, width]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
# Copied from transformers.models.owlvit.modeling_owlvit.OwlViTTextEmbeddings with OwlViT->Owlv2
| Owlv2VisionEmbeddings |
python | scipy__scipy | scipy/optimize/tests/test_linprog.py | {
"start": 90533,
"end": 93693
} | class ____(LinprogRSTests):
options = {}
def test_cyclic_bland(self):
pytest.skip("Intermittent failure acceptable.")
def test_nontrivial_problem_with_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_unbounded_variables(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bounds = [(None, None), (None, None), (0, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bounded_variables(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bounds = [(None, 1), (1, None), (0, None), (.4, .6)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_negative_unbounded_variable(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
b_eq = [4]
x_star = np.array([-219/385, 582/385, 0, 4/10])
f_star = 3951/385
bounds = [(None, None), (1, None), (0, None), (.4, .6)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bad_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bad_guess = [1, 2, 3, .5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=bad_guess)
assert_equal(res.status, 6)
def test_redundant_constraints_with_guess(self):
rng = np.random.default_rng(984298498729345)
A, b, c, _, _ = magic_square(3, rng=rng)
p = rng.random(c.shape)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "A_eq does not appear...", OptimizeWarning)
warnings.filterwarnings(
"ignore", "invalid value encountered", RuntimeWarning)
warnings.simplefilter("ignore", LinAlgWarning)
res = linprog(c, A_eq=A, b_eq=b, method=self.method)
res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x)
res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x)
_assert_success(res2, desired_fun=res.fun)
assert_equal(res2.nit, 0)
_assert_success(res3)
assert_(res3.nit < res.nit) # hot start reduces iterations
| TestLinprogRSCommon |
python | django__django | tests/sitemaps_tests/urls/http.py | {
"start": 1633,
"end": 1723
} | class ____(SimpleSitemap):
lastmod = datetime(2013, 3, 13, 10, 0, 0)
| FixedLastmodSitemap |
python | pytorch__pytorch | test/test_mps.py | {
"start": 430862,
"end": 433284
} | class ____(TestCaseMPS):
def test_slicing_with_step(self):
# Slicing with step
# https://github.com/pytorch/pytorch/issues/78886
x_mps = torch.zeros(10, dtype=torch.float32, device="mps")
x_mps[::2] = 1.0
x_cpu = torch.zeros(10, dtype=torch.float32, device="cpu")
x_cpu[::2] = 1.0
self.assertEqual(x_cpu, x_mps)
def test_cast_gather_scatter(self):
for _ in range(50):
input = np.random.randint(0, 255, size=(5, 5, 4), dtype=np.uint8)
with torch.no_grad():
s = torch.tensor(input, dtype=torch.uint8, device="mps").unsqueeze(0)
s_cpu = torch.tensor(input, dtype=torch.uint8, device="cpu").unsqueeze(0)
s = s.long()
s_cpu = s_cpu.long()
self.assertEqual(s.cpu(), s_cpu)
s = s.float()
s_cpu = s_cpu.float()
self.assertEqual(s.cpu(), s_cpu)
s /= 255
s_cpu /= 255
self.assertEqual(s.cpu(), s_cpu)
def test_slicing_replace_column(self):
# https://github.com/pytorch/pytorch/issues/78074
def _helper(tensor_data):
x_cpu = torch.tensor(tensor_data)
x_mps = x_cpu.to('mps')
x_cpu[:, 0] = 7
x_mps[:, 0] = 7
self.assertEqual(x_cpu, x_mps)
_helper([[1, 2, 3], [4, 5, 6]])
_helper([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
_helper([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
def test_inplace_scatter(self):
# https://github.com/pytorch/pytorch/issues/79672
a_mps = torch.ones((2, 2),).to(torch.device("mps"))
b_mps = torch.ones((2, 2),).to(torch.device("mps"))
a_cpu = torch.ones((2, 2),).to(torch.device("cpu"))
b_cpu = torch.ones((2, 2),).to(torch.device("cpu"))
a_mps[:, 0] += b_mps[:, 0]
a_cpu[:, 0] += b_cpu[:, 0]
self.assertEqual(a_cpu, a_mps)
a_mps[:, 0] = a_mps[:, 0] + b_mps[:, 0]
a_cpu[:, 0] = a_cpu[:, 0] + b_cpu[:, 0]
self.assertEqual(a_cpu, a_mps)
# These tests were taken from test/test_view_ops.py
# They are subset of those tests as currently only this subset is working.
# This whole `class` will be removed when we add generic device testing. There
# are no additional tests added apart from what is part of test_view_ops.py
| TestGatherScatter |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/extractors/test_base.py | {
"start": 7237,
"end": 14767
} | class ____(BaseOperator):
get_openlineage_facets: list[BaseFacet] = []
def execute(self, context) -> Any:
pass
def test_default_extraction():
extractor = ExtractorManager().get_extractor_class(OperatorWithoutFailure)
assert extractor is DefaultExtractor
metadata = extractor(OperatorWithoutFailure(task_id="test")).extract()
task_instance = mock.MagicMock()
metadata_on_complete = extractor(OperatorWithoutFailure(task_id="test")).extract_on_complete(
task_instance=task_instance
)
assert metadata == OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=JOB_FACETS,
)
assert metadata_on_complete == OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=FINISHED_FACETS,
)
def test_extraction_without_on_complete():
extractor = ExtractorManager().get_extractor_class(OperatorWithoutComplete)
assert extractor is DefaultExtractor
metadata = extractor(OperatorWithoutComplete(task_id="test")).extract()
task_instance = mock.MagicMock()
metadata_on_complete = extractor(OperatorWithoutComplete(task_id="test")).extract_on_complete(
task_instance=task_instance
)
expected_task_metadata = OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=JOB_FACETS,
)
assert metadata == expected_task_metadata
assert metadata_on_complete == expected_task_metadata
def test_extraction_without_on_start():
extractor = ExtractorManager().get_extractor_class(OperatorWithoutStart)
assert extractor is DefaultExtractor
metadata = extractor(OperatorWithoutStart(task_id="test")).extract()
task_instance = mock.MagicMock()
metadata_on_complete = extractor(OperatorWithoutStart(task_id="test")).extract_on_complete(
task_instance=task_instance
)
assert metadata == OperatorLineage()
assert metadata_on_complete == OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=FINISHED_FACETS,
)
@pytest.mark.parametrize(
("operator_class", "task_state", "expected_job_facets"),
(
(OperatorWithAllOlMethods, TaskInstanceState.FAILED, FAILED_FACETS),
(OperatorWithAllOlMethods, TaskInstanceState.RUNNING, JOB_FACETS),
(OperatorWithAllOlMethods, TaskInstanceState.SUCCESS, FINISHED_FACETS),
(OperatorWithAllOlMethods, TaskInstanceState.UP_FOR_RETRY, FINISHED_FACETS), # Should never happen
(OperatorWithAllOlMethods, None, FINISHED_FACETS), # Should never happen
(OperatorWithoutFailure, TaskInstanceState.FAILED, FINISHED_FACETS),
(OperatorWithoutFailure, TaskInstanceState.RUNNING, JOB_FACETS),
(OperatorWithoutFailure, TaskInstanceState.SUCCESS, FINISHED_FACETS),
(OperatorWithoutFailure, TaskInstanceState.UP_FOR_RETRY, FINISHED_FACETS), # Should never happen
(OperatorWithoutFailure, None, FINISHED_FACETS), # Should never happen
(OperatorWithoutStart, TaskInstanceState.FAILED, FINISHED_FACETS),
(OperatorWithoutStart, TaskInstanceState.RUNNING, {}),
(OperatorWithoutStart, TaskInstanceState.SUCCESS, FINISHED_FACETS),
(OperatorWithoutStart, TaskInstanceState.UP_FOR_RETRY, FINISHED_FACETS), # Should never happen
(OperatorWithoutStart, None, FINISHED_FACETS), # Should never happen
(OperatorWithoutComplete, TaskInstanceState.FAILED, JOB_FACETS),
(OperatorWithoutComplete, TaskInstanceState.RUNNING, JOB_FACETS),
(OperatorWithoutComplete, TaskInstanceState.SUCCESS, JOB_FACETS),
(OperatorWithoutComplete, TaskInstanceState.UP_FOR_RETRY, JOB_FACETS), # Should never happen
(OperatorWithoutComplete, None, JOB_FACETS), # Should never happen
),
)
def test_extractor_manager_calls_appropriate_extractor_method(
operator_class, task_state, expected_job_facets
):
extractor_manager = ExtractorManager()
ti = mock.MagicMock()
metadata = extractor_manager.extract_metadata(
dagrun=mock.MagicMock(run_id="dagrun_run_id"),
task=operator_class(task_id="task_id"),
task_instance_state=task_state,
task_instance=ti,
)
assert metadata.job_facets == expected_job_facets
if not expected_job_facets: # Empty OperatorLineage() is expected
assert not metadata.inputs
assert not metadata.outputs
assert not metadata.run_facets
else:
assert metadata.inputs == INPUTS
assert metadata.outputs == OUTPUTS
assert metadata.run_facets == RUN_FACETS
@mock.patch("airflow.providers.openlineage.conf.custom_extractors")
def test_extractors_env_var(custom_extractors):
custom_extractors.return_value = {
"unit.openlineage.extractors.test_base.ExtractorWithoutExecuteOnFailure"
}
extractor = ExtractorManager().get_extractor_class(SimpleCustomOperator(task_id="example"))
assert extractor is ExtractorWithoutExecuteOnFailure
def test_extractor_without_extract_on_failure_calls_extract_on_complete():
extractor = ExtractorWithoutExecuteOnFailure(SimpleCustomOperator(task_id="example"))
result = extractor.extract_on_failure(None)
assert result == OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=FINISHED_FACETS,
)
def test_extractor_without_extract_on_complete_and_failure_always_calls_extract():
extractor = ExtractorWithExecuteExtractionOnly(SimpleCustomOperator(task_id="example"))
expected_result = OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=JOB_FACETS,
)
result = extractor.extract_on_failure(None)
assert result == expected_result
result = extractor.extract_on_complete(None)
assert result == expected_result
result = extractor.extract()
assert result == expected_result
def test_does_not_use_default_extractor_when_not_a_method():
extractor_class = ExtractorManager().get_extractor_class(BrokenOperator(task_id="a"))
assert extractor_class is None
def test_does_not_use_default_extractor_when_no_get_openlineage_facets():
extractor_class = ExtractorManager().get_extractor_class(BaseOperator(task_id="b"))
assert extractor_class is None
def test_does_not_use_default_extractor_when_explicit_extractor():
extractor_class = ExtractorManager().get_extractor_class(
PythonOperator(task_id="c", python_callable=lambda: 7)
)
assert extractor_class is PythonExtractor
def test_default_extractor_uses_different_operatorlineage_class():
operator = OperatorDifferentOperatorLineageClass(task_id="task_id")
extractor_class = ExtractorManager().get_extractor_class(operator)
assert extractor_class is DefaultExtractor
extractor = extractor_class(operator)
assert extractor.extract() == OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=JOB_FACETS,
)
def test_default_extractor_uses_wrong_operatorlineage_class():
operator = OperatorWrongOperatorLineageClass(task_id="task_id")
# If extractor returns lineage class that can't be changed into OperatorLineage, just return
# empty OperatorLineage
assert ExtractorManager().extract_metadata(mock.MagicMock(), operator, None) == OperatorLineage()
| BrokenOperator |
python | python__mypy | mypy/report.py | {
"start": 28660,
"end": 30442
} | class ____(AbstractXmlReporter):
"""Public reporter that exports HTML via XSLT.
This is slightly different than running `xsltproc` on the .xml files,
because it passes a parameter to rewrite the links.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.xslt_html = etree.XSLT(etree.parse(self.memory_xml.xslt_html_path))
self.param_html = etree.XSLT.strparam("html")
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
last_xml = self.memory_xml.last_xml
if last_xml is None:
return
path = os.path.relpath(tree.path)
if path.startswith(".."):
return
out_path = os.path.join(self.output_dir, "html", path + ".html")
os.makedirs(os.path.dirname(out_path), exist_ok=True)
transformed_html = bytes(self.xslt_html(last_xml, ext=self.param_html))
with open(out_path, "wb") as out_file:
out_file.write(transformed_html)
def on_finish(self) -> None:
last_xml = self.memory_xml.last_xml
assert last_xml is not None
out_path = os.path.join(self.output_dir, "index.html")
out_css = os.path.join(self.output_dir, "mypy-html.css")
transformed_html = bytes(self.xslt_html(last_xml, ext=self.param_html))
with open(out_path, "wb") as out_file:
out_file.write(transformed_html)
shutil.copyfile(self.memory_xml.css_html_path, out_css)
print("Generated HTML report (via XSLT):", os.path.abspath(out_path))
register_reporter("xslt-html", XsltHtmlReporter, needs_lxml=True)
| XsltHtmlReporter |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 45897,
"end": 49361
} | class ____(Response):
"""
Response of models.delete_many endpoint.
:param succeeded:
:type succeeded: Sequence[dict]
:param failed:
:type failed: Sequence[dict]
"""
_service = "models"
_action = "delete_many"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"failed": {
"items": {
"properties": {
"error": {
"description": "Error info",
"properties": {
"codes": {
"items": {"type": "integer"},
"type": "array",
},
"data": {
"additionalProperties": True,
"type": "object",
},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {
"description": "ID of the failed entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"succeeded": {
"items": {
"properties": {
"deleted": {
"description": "Indicates whether the model was deleted",
"type": "boolean",
},
"id": {
"description": "ID of the succeeded entity",
"type": "string",
},
"url": {
"description": "The url of the model file",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, succeeded: Optional[List[dict]] = None, failed: Optional[List[dict]] = None, **kwargs: Any
) -> None:
super(DeleteManyResponse, self).__init__(**kwargs)
self.succeeded = succeeded
self.failed = failed
@schema_property("succeeded")
def succeeded(self) -> Optional[List[dict]]:
return self._property_succeeded
@succeeded.setter
def succeeded(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_succeeded = None
return
self.assert_isinstance(value, "succeeded", (list, tuple))
self.assert_isinstance(value, "succeeded", (dict,), is_array=True)
self._property_succeeded = value
@schema_property("failed")
def failed(self) -> Optional[List[dict]]:
return self._property_failed
@failed.setter
def failed(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_failed = None
return
self.assert_isinstance(value, "failed", (list, tuple))
self.assert_isinstance(value, "failed", (dict,), is_array=True)
self._property_failed = value
| DeleteManyResponse |
python | pytorch__pytorch | torch/_export/serde/schema.py | {
"start": 8384,
"end": 8507
} | class ____:
arg: Annotated[CustomObjArgument, 10]
custom_obj_name: Annotated[str, 20]
@dataclass
| InputToCustomObjSpec |
python | matplotlib__matplotlib | lib/matplotlib/legend.py | {
"start": 1905,
"end": 14038
} | class ____(DraggableOffsetBox):
def __init__(self, legend, use_blit=False, update="loc"):
"""
Wrapper around a `.Legend` to support mouse dragging.
Parameters
----------
legend : `.Legend`
The `.Legend` instance to wrap.
use_blit : bool, optional
Use blitting for faster image composition. For details see
:ref:`func-animation`.
update : {'loc', 'bbox'}, optional
If "loc", update the *loc* parameter of the legend upon finalizing.
If "bbox", update the *bbox_to_anchor* parameter.
"""
self.legend = legend
_api.check_in_list(["loc", "bbox"], update=update)
self._update = update
super().__init__(legend, legend._legend_box, use_blit=use_blit)
def finalize_offset(self):
if self._update == "loc":
self._update_loc(self.get_loc_in_canvas())
elif self._update == "bbox":
self._update_bbox_to_anchor(self.get_loc_in_canvas())
def _update_loc(self, loc_in_canvas):
bbox = self.legend.get_bbox_to_anchor()
# if bbox has zero width or height, the transformation is
# ill-defined. Fall back to the default bbox_to_anchor.
if bbox.width == 0 or bbox.height == 0:
self.legend.set_bbox_to_anchor(None)
bbox = self.legend.get_bbox_to_anchor()
_bbox_transform = BboxTransformFrom(bbox)
self.legend._loc = tuple(_bbox_transform.transform(loc_in_canvas))
def _update_bbox_to_anchor(self, loc_in_canvas):
loc_in_bbox = self.legend.axes.transAxes.transform(loc_in_canvas)
self.legend.set_bbox_to_anchor(loc_in_bbox)
_legend_kw_doc_base = """
bbox_to_anchor : `.BboxBase`, 2-tuple, or 4-tuple of floats
Box that is used to position the legend in conjunction with *loc*.
Defaults to ``axes.bbox`` (if called as a method to `.Axes.legend`) or
``figure.bbox`` (if ``figure.legend``). This argument allows arbitrary
placement of the legend.
Bbox coordinates are interpreted in the coordinate system given by
*bbox_transform*, with the default transform
Axes or Figure coordinates, depending on which ``legend`` is called.
If a 4-tuple or `.BboxBase` is given, then it specifies the bbox
``(x, y, width, height)`` that the legend is placed in.
To put the legend in the best location in the bottom right
quadrant of the Axes (or figure)::
loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5)
A 2-tuple ``(x, y)`` places the corner of the legend specified by *loc* at
x, y. For example, to put the legend's upper right-hand corner in the
center of the Axes (or figure) the following keywords can be used::
loc='upper right', bbox_to_anchor=(0.5, 0.5)
ncols : int, default: 1
The number of columns that the legend has.
For backward compatibility, the spelling *ncol* is also supported
but it is discouraged. If both are given, *ncols* takes precedence.
prop : None or `~matplotlib.font_manager.FontProperties` or dict
The font properties of the legend. If None (default), the current
:data:`matplotlib.rcParams` will be used.
fontsize : int or {'xx-small', 'x-small', 'small', 'medium', 'large', \
'x-large', 'xx-large'}
The font size of the legend. If the value is numeric the size will be the
absolute font size in points. String values are relative to the current
default font size. This argument is only used if *prop* is not specified.
labelcolor : str or list, default: :rc:`legend.labelcolor`
The color of the text in the legend. Either a valid color string
(for example, 'red'), or a list of color strings. The labelcolor can
also be made to match the color of the line or marker using 'linecolor',
'markerfacecolor' (or 'mfc'), or 'markeredgecolor' (or 'mec').
Labelcolor can be set globally using :rc:`legend.labelcolor`. If None,
use :rc:`text.color`.
numpoints : int, default: :rc:`legend.numpoints`
The number of marker points in the legend when creating a legend
entry for a `.Line2D` (line).
scatterpoints : int, default: :rc:`legend.scatterpoints`
The number of marker points in the legend when creating
a legend entry for a `.PathCollection` (scatter plot).
scatteryoffsets : iterable of floats, default: ``[0.375, 0.5, 0.3125]``
The vertical offset (relative to the font size) for the markers
created for a scatter plot legend entry. 0.0 is at the base the
legend text, and 1.0 is at the top. To draw all markers at the
same height, set to ``[0.5]``.
markerscale : float, default: :rc:`legend.markerscale`
The relative size of legend markers compared to the originally drawn ones.
markerfirst : bool, default: True
If *True*, legend marker is placed to the left of the legend label.
If *False*, legend marker is placed to the right of the legend label.
reverse : bool, default: False
If *True*, the legend labels are displayed in reverse order from the input.
If *False*, the legend labels are displayed in the same order as the input.
.. versionadded:: 3.7
frameon : bool, default: :rc:`legend.frameon`
Whether the legend should be drawn on a patch (frame).
fancybox : bool, default: :rc:`legend.fancybox`
Whether round edges should be enabled around the `.FancyBboxPatch` which
makes up the legend's background.
shadow : None, bool or dict, default: :rc:`legend.shadow`
Whether to draw a shadow behind the legend.
The shadow can be configured using `.Patch` keywords.
Customization via :rc:`legend.shadow` is currently not supported.
framealpha : float, default: :rc:`legend.framealpha`
The alpha transparency of the legend's background.
If *shadow* is activated and *framealpha* is ``None``, the default value is
ignored.
facecolor : "inherit" or color, default: :rc:`legend.facecolor`
The legend's background color.
If ``"inherit"``, use :rc:`axes.facecolor`.
edgecolor : "inherit" or color, default: :rc:`legend.edgecolor`
The legend's background patch edge color.
If ``"inherit"``, use :rc:`axes.edgecolor`.
mode : {"expand", None}
If *mode* is set to ``"expand"`` the legend will be horizontally
expanded to fill the Axes area (or *bbox_to_anchor* if defines
the legend's size).
bbox_transform : None or `~matplotlib.transforms.Transform`
The transform for the bounding box (*bbox_to_anchor*). For a value
of ``None`` (default) the Axes'
:data:`!matplotlib.axes.Axes.transAxes` transform will be used.
title : str or None
The legend's title. Default is no title (``None``).
title_fontproperties : None or `~matplotlib.font_manager.FontProperties` or dict
The font properties of the legend's title. If None (default), the
*title_fontsize* argument will be used if present; if *title_fontsize* is
also None, the current :rc:`legend.title_fontsize` will be used.
title_fontsize : int or {'xx-small', 'x-small', 'small', 'medium', 'large', \
'x-large', 'xx-large'}, default: :rc:`legend.title_fontsize`
The font size of the legend's title.
Note: This cannot be combined with *title_fontproperties*. If you want
to set the fontsize alongside other font properties, use the *size*
parameter in *title_fontproperties*.
alignment : {'center', 'left', 'right'}, default: 'center'
The alignment of the legend title and the box of entries. The entries
are aligned as a single block, so that markers always lined up.
borderpad : float, default: :rc:`legend.borderpad`
The fractional whitespace inside the legend border, in font-size units.
labelspacing : float, default: :rc:`legend.labelspacing`
The vertical space between the legend entries, in font-size units.
handlelength : float, default: :rc:`legend.handlelength`
The length of the legend handles, in font-size units.
handleheight : float, default: :rc:`legend.handleheight`
The height of the legend handles, in font-size units.
handletextpad : float, default: :rc:`legend.handletextpad`
The pad between the legend handle and text, in font-size units.
borderaxespad : float, default: :rc:`legend.borderaxespad`
The pad between the Axes and legend border, in font-size units.
columnspacing : float, default: :rc:`legend.columnspacing`
The spacing between columns, in font-size units.
handler_map : dict or None
The custom dictionary mapping instances or types to a legend
handler. This *handler_map* updates the default handler map
found at `matplotlib.legend.Legend.get_legend_handler_map`.
draggable : bool, default: False
Whether the legend can be dragged with the mouse.
"""
_loc_doc_base = """
loc : str or pair of floats, default: {default}
The location of the legend.
The strings ``'upper left'``, ``'upper right'``, ``'lower left'``,
``'lower right'`` place the legend at the corresponding corner of the
{parent}.
The strings ``'upper center'``, ``'lower center'``, ``'center left'``,
``'center right'`` place the legend at the center of the corresponding edge
of the {parent}.
The string ``'center'`` places the legend at the center of the {parent}.
{best}
The location can also be a 2-tuple giving the coordinates of the lower-left
corner of the legend in {parent} coordinates (in which case *bbox_to_anchor*
will be ignored).
For back-compatibility, ``'center right'`` (but no other location) can also
be spelled ``'right'``, and each "string" location can also be given as a
numeric value:
================== =============
Location String Location Code
================== =============
'best' (Axes only) 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
================== =============
{outside}"""
_loc_doc_best = """
The string ``'best'`` places the legend at the location, among the nine
locations defined so far, with the minimum overlap with other drawn
artists. This option can be quite slow for plots with large amounts of
data; your plotting speed may benefit from providing a specific location.
"""
_legend_kw_axes_st = (
_loc_doc_base.format(parent='axes', default=':rc:`legend.loc`',
best=_loc_doc_best, outside='') +
_legend_kw_doc_base)
_docstring.interpd.register(_legend_kw_axes=_legend_kw_axes_st)
_outside_doc = """
If a figure is using the constrained layout manager, the string codes
of the *loc* keyword argument can get better layout behaviour using the
prefix 'outside'. There is ambiguity at the corners, so 'outside
upper right' will make space for the legend above the rest of the
axes in the layout, and 'outside right upper' will make space on the
right side of the layout. In addition to the values of *loc*
listed above, we have 'outside right upper', 'outside right lower',
'outside left upper', and 'outside left lower'. See
:ref:`legend_guide` for more details.
"""
_legend_kw_figure_st = (
_loc_doc_base.format(parent='figure', default="'upper right'",
best='', outside=_outside_doc) +
_legend_kw_doc_base)
_docstring.interpd.register(_legend_kw_figure=_legend_kw_figure_st)
_legend_kw_both_st = (
_loc_doc_base.format(parent='axes/figure',
default=":rc:`legend.loc` for Axes, 'upper right' for Figure",
best=_loc_doc_best, outside=_outside_doc) +
_legend_kw_doc_base)
_docstring.interpd.register(_legend_kw_doc=_legend_kw_both_st)
_legend_kw_set_loc_st = (
_loc_doc_base.format(parent='axes/figure',
default=":rc:`legend.loc` for Axes, 'upper right' for Figure",
best=_loc_doc_best, outside=_outside_doc))
_docstring.interpd.register(_legend_kw_set_loc_doc=_legend_kw_set_loc_st)
| DraggableLegend |
python | kamyu104__LeetCode-Solutions | Python/paint-fence.py | {
"start": 517,
"end": 941
} | class ____(object):
def numWays(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
if n == 0:
return 0
elif n == 1:
return k
ways = [0] * n
ways[0] = k
ways[1] = (k - 1) * ways[0] + k
for i in xrange(2, n):
ways[i] = (k - 1) * (ways[i - 1] + ways[i - 2])
return ways[n - 1]
| Solution2 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 99503,
"end": 99812
} | class ____(sgqlc.types.Enum):
"""Severity of the vulnerability.
Enumeration Choices:
* `CRITICAL`: Critical.
* `HIGH`: High.
* `LOW`: Low.
* `MODERATE`: Moderate.
"""
__schema__ = github_schema
__choices__ = ("CRITICAL", "HIGH", "LOW", "MODERATE")
| SecurityAdvisorySeverity |
python | huggingface__transformers | src/transformers/models/nanochat/modular_nanochat.py | {
"start": 4592,
"end": 4902
} | class ____(LlamaDecoderLayer):
def __init__(self, config: NanoChatConfig, layer_idx: int):
super().__init__()
self.input_layernorm = NanoChatRMSNorm(eps=config.rms_norm_eps)
self.post_attention_layernorm = NanoChatRMSNorm(eps=config.rms_norm_eps)
@auto_docstring
| NanoChatDecoderLayer |
python | ansible__ansible | lib/ansible/_internal/_templating/_lazy_containers.py | {
"start": 9837,
"end": 15778
} | class ____(_AnsibleTaggedDict, _AnsibleLazyTemplateMixin):
__slots__ = _AnsibleLazyTemplateMixin._SLOTS
def __init__(self, contents: t.Iterable | _LazyValueSource, /, **kwargs) -> None:
if isinstance(contents, _AnsibleLazyTemplateDict):
super().__init__(dict.items(contents), **kwargs)
elif isinstance(contents, _LazyValueSource):
super().__init__(contents.source, **kwargs)
else:
raise UnsupportedConstructionMethodError()
_AnsibleLazyTemplateMixin.__init__(self, contents)
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
if (value := super().get(key, _NoKeySentinel)) is _NoKeySentinel:
return default
return self._proxy_or_render_lazy_value(key, value)
def __getitem__(self, key: t.Any, /) -> t.Any:
return self._proxy_or_render_lazy_value(key, super().__getitem__(key))
def __str__(self):
return str(self.copy()._native_copy()) # inefficient, but avoids mutating the current instance (to make debugging practical)
def __repr__(self):
return repr(self.copy()._native_copy()) # inefficient, but avoids mutating the current instance (to make debugging practical)
def __iter__(self):
# We're using the base implementation, but must override `__iter__` to skip `dict` fast-path copy, which would bypass lazy behavior.
# See: https://github.com/python/cpython/blob/ffcc450a9b8b6927549b501eff7ac14abc238448/Objects/dictobject.c#L3861-L3864
return super().__iter__()
def setdefault(self, key, default=None, /) -> t.Any:
if (value := self.get(key, _NoKeySentinel)) is not _NoKeySentinel:
return value
super().__setitem__(key, default)
return default
def items(self):
for key, value in super().items():
yield key, self._proxy_or_render_lazy_value(key, value)
def values(self):
for _key, value in self.items():
yield value
def pop(self, key, default=_NoKeySentinel, /) -> t.Any:
if (value := super().get(key, _NoKeySentinel)) is _NoKeySentinel:
if default is _NoKeySentinel:
raise KeyError(key)
return default
value = self._proxy_or_render_lazy_value(_NoKeySentinel, value)
del self[key]
return value
def popitem(self) -> t.Any:
try:
key = next(reversed(self))
except StopIteration:
raise KeyError("popitem(): dictionary is empty")
value = self._proxy_or_render_lazy_value(_NoKeySentinel, self[key])
del self[key]
return key, value
def _native_copy(self) -> dict:
return dict(self.items())
@staticmethod
def _item_source(value: dict) -> dict | _LazyValueSource:
if isinstance(value, _AnsibleLazyTemplateDict):
return _LazyValueSource(source=dict.items(value), templar=value._templar, lazy_options=value._lazy_options)
return value
def _yield_non_lazy_dict_items(self) -> t.Iterator[tuple[str, t.Any]]:
"""
Delegate to the base collection items iterator to yield the raw contents.
As of Python 3.13, generator functions are significantly faster than inline generator expressions.
"""
for k, v in dict.items(self):
yield k, v.value if type(v) is _LazyValue else v # pylint: disable=unidiomatic-typecheck
def _non_lazy_copy(self) -> dict:
return AnsibleTagHelper.tag_copy(self, self._yield_non_lazy_dict_items(), value_type=dict)
@staticmethod
def _lazy_values(values: dict, lazy_options: LazyOptions) -> _LazyValueSource:
return _LazyValueSource(source=((k, _LazyValue(v)) for k, v in values.items()), templar=TemplateContext.current().templar, lazy_options=lazy_options)
@staticmethod
def _proxy_or_render_other(other: t.Any | None) -> None:
"""Call `_proxy_or_render_lazy_values` if `other` is a lazy dict. Used internally by comparison methods."""
if type(other) is _AnsibleLazyTemplateDict: # pylint: disable=unidiomatic-typecheck
other._proxy_or_render_lazy_values()
def _proxy_or_render_lazy_values(self) -> None:
"""Ensure all `_LazyValue` wrapped values have been processed."""
for _unused in self.values():
pass
def __eq__(self, other):
self._proxy_or_render_lazy_values()
self._proxy_or_render_other(other)
return super().__eq__(other)
def __ne__(self, other):
self._proxy_or_render_lazy_values()
self._proxy_or_render_other(other)
return super().__ne__(other)
def __or__(self, other):
# DTFIX-FUTURE: support preservation of laziness when possible like we do for list
# Both sides end up going through _proxy_or_render_lazy_value, so there's no Templar preservation needed.
# In the future this could be made more lazy when both Templar instances are the same, or if per-value Templar tracking was used.
return super().__or__(other)
def __ror__(self, other):
# DTFIX-FUTURE: support preservation of laziness when possible like we do for list
# Both sides end up going through _proxy_or_render_lazy_value, so there's no Templar preservation needed.
# In the future this could be made more lazy when both Templar instances are the same, or if per-value Templar tracking was used.
return super().__ror__(other)
def __deepcopy__(self, memo):
return _AnsibleLazyTemplateDict(
_LazyValueSource(
source=((copy.deepcopy(k), copy.deepcopy(v)) for k, v in super().items()),
templar=copy.deepcopy(self._templar),
lazy_options=copy.deepcopy(self._lazy_options),
)
)
@t.final # consumers of lazy collections rely heavily on the concrete types being final
| _AnsibleLazyTemplateDict |
python | doocs__leetcode | solution/1800-1899/1813.Sentence Similarity III/Solution.py | {
"start": 0,
"end": 473
} | class ____:
def areSentencesSimilar(self, sentence1: str, sentence2: str) -> bool:
words1, words2 = sentence1.split(), sentence2.split()
m, n = len(words1), len(words2)
if m < n:
words1, words2 = words2, words1
m, n = n, m
i = j = 0
while i < n and words1[i] == words2[i]:
i += 1
while j < n and words1[m - 1 - j] == words2[n - 1 - j]:
j += 1
return i + j >= n
| Solution |
python | kamyu104__LeetCode-Solutions | Python/groups-of-strings.py | {
"start": 873,
"end": 1579
} | class ____(object):
def groupStrings(self, words):
"""
:type words: List[str]
:rtype: List[int]
"""
uf = UnionFind(len(words))
lookup = {}
for i, x in enumerate(words):
mask = reduce(lambda x, y: x|(1<<(ord(y)-ord('a'))), x, 0)
if mask not in lookup:
lookup[mask] = i
uf.union_set(i, lookup[mask])
bit = 1
while bit <= mask:
if mask&bit:
if mask^bit not in lookup:
lookup[mask^bit] = i
uf.union_set(i, lookup[mask^bit])
bit <<= 1
return [uf.total, max(uf.size)]
| Solution |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 541431,
"end": 578033
} | class ____:
"""Create a new shape."""
@staticmethod
def horizontal_angle(C, P):
"""Return the angle to the horizontal for the connection from C to P.
This uses the arcus sine function and resolves its inherent ambiguity by
looking up in which quadrant vector S = P - C is located.
"""
S = Point(P - C).unit # unit vector 'C' -> 'P'
alfa = math.asin(abs(S.y)) # absolute angle from horizontal
if S.x < 0: # make arcsin result unique
if S.y <= 0: # bottom-left
alfa = -(math.pi - alfa)
else: # top-left
alfa = math.pi - alfa
else:
if S.y >= 0: # top-right
pass
else: # bottom-right
alfa = -alfa
return alfa
def __init__(self, page: Page):
CheckParent(page)
self.page = page
self.doc = page.parent
if not self.doc.is_pdf:
raise ValueError("is no PDF")
self.height = page.mediabox_size.y
self.width = page.mediabox_size.x
self.x = page.cropbox_position.x
self.y = page.cropbox_position.y
self.pctm = page.transformation_matrix # page transf. matrix
self.ipctm = ~self.pctm # inverted transf. matrix
self.draw_cont = ""
self.text_cont = ""
self.totalcont = ""
self.last_point = None
self.rect = None
def updateRect(self, x):
if self.rect is None:
if len(x) == 2:
self.rect = Rect(x, x)
else:
self.rect = Rect(x)
else:
if len(x) == 2:
x = Point(x)
self.rect.x0 = min(self.rect.x0, x.x)
self.rect.y0 = min(self.rect.y0, x.y)
self.rect.x1 = max(self.rect.x1, x.x)
self.rect.y1 = max(self.rect.y1, x.y)
else:
x = Rect(x)
self.rect.x0 = min(self.rect.x0, x.x0)
self.rect.y0 = min(self.rect.y0, x.y0)
self.rect.x1 = max(self.rect.x1, x.x1)
self.rect.y1 = max(self.rect.y1, x.y1)
def draw_line(self, p1: point_like, p2: point_like) -> Point:
"""Draw a line between two points."""
p1 = Point(p1)
p2 = Point(p2)
if not (self.last_point == p1):
self.draw_cont += _format_g(JM_TUPLE(p1 * self.ipctm)) + " m\n"
self.last_point = p1
self.updateRect(p1)
self.draw_cont += _format_g(JM_TUPLE(p2 * self.ipctm)) + " l\n"
self.updateRect(p2)
self.last_point = p2
return self.last_point
def draw_polyline(self, points: list) -> Point:
"""Draw several connected line segments."""
for i, p in enumerate(points):
if i == 0:
if not (self.last_point == Point(p)):
self.draw_cont += _format_g(JM_TUPLE(Point(p) * self.ipctm)) + " m\n"
self.last_point = Point(p)
else:
self.draw_cont += _format_g(JM_TUPLE(Point(p) * self.ipctm)) + " l\n"
self.updateRect(p)
self.last_point = Point(points[-1])
return self.last_point
def draw_bezier(
self,
p1: point_like,
p2: point_like,
p3: point_like,
p4: point_like,
) -> Point:
"""Draw a standard cubic Bezier curve."""
p1 = Point(p1)
p2 = Point(p2)
p3 = Point(p3)
p4 = Point(p4)
if not (self.last_point == p1):
self.draw_cont += _format_g(JM_TUPLE(p1 * self.ipctm)) + " m\n"
args = JM_TUPLE(list(p2 * self.ipctm) + list(p3 * self.ipctm) + list(p4 * self.ipctm))
self.draw_cont += _format_g(args) + " c\n"
self.updateRect(p1)
self.updateRect(p2)
self.updateRect(p3)
self.updateRect(p4)
self.last_point = p4
return self.last_point
def draw_oval(self, tetra: typing.Union[quad_like, rect_like]) -> Point:
"""Draw an ellipse inside a tetrapod."""
if len(tetra) != 4:
raise ValueError("invalid arg length")
if hasattr(tetra[0], "__float__"):
q = Rect(tetra).quad
else:
q = Quad(tetra)
mt = q.ul + (q.ur - q.ul) * 0.5
mr = q.ur + (q.lr - q.ur) * 0.5
mb = q.ll + (q.lr - q.ll) * 0.5
ml = q.ul + (q.ll - q.ul) * 0.5
if not (self.last_point == ml):
self.draw_cont += _format_g(JM_TUPLE(ml * self.ipctm)) + " m\n"
self.last_point = ml
self.draw_curve(ml, q.ll, mb)
self.draw_curve(mb, q.lr, mr)
self.draw_curve(mr, q.ur, mt)
self.draw_curve(mt, q.ul, ml)
self.updateRect(q.rect)
self.last_point = ml
return self.last_point
def draw_circle(self, center: point_like, radius: float) -> Point:
"""Draw a circle given its center and radius."""
if not radius > EPSILON:
raise ValueError("radius must be positive")
center = Point(center)
p1 = center - (radius, 0)
return self.draw_sector(center, p1, 360, fullSector=False)
def draw_curve(
self,
p1: point_like,
p2: point_like,
p3: point_like,
) -> Point:
"""Draw a curve between points using one control point."""
kappa = 0.55228474983
p1 = Point(p1)
p2 = Point(p2)
p3 = Point(p3)
k1 = p1 + (p2 - p1) * kappa
k2 = p3 + (p2 - p3) * kappa
return self.draw_bezier(p1, k1, k2, p3)
def draw_sector(
self,
center: point_like,
point: point_like,
beta: float,
fullSector: bool = True,
) -> Point:
"""Draw a circle sector."""
center = Point(center)
point = Point(point)
l3 = lambda a, b: _format_g((a, b)) + " m\n"
l4 = lambda a, b, c, d, e, f: _format_g((a, b, c, d, e, f)) + " c\n"
l5 = lambda a, b: _format_g((a, b)) + " l\n"
betar = math.radians(-beta)
w360 = math.radians(math.copysign(360, betar)) * (-1)
w90 = math.radians(math.copysign(90, betar))
w45 = w90 / 2
while abs(betar) > 2 * math.pi:
betar += w360 # bring angle below 360 degrees
if not (self.last_point == point):
self.draw_cont += l3(*JM_TUPLE(point * self.ipctm))
self.last_point = point
Q = Point(0, 0) # just make sure it exists
C = center
P = point
S = P - C # vector 'center' -> 'point'
rad = abs(S) # circle radius
if not rad > EPSILON:
raise ValueError("radius must be positive")
alfa = self.horizontal_angle(center, point)
while abs(betar) > abs(w90): # draw 90 degree arcs
q1 = C.x + math.cos(alfa + w90) * rad
q2 = C.y + math.sin(alfa + w90) * rad
Q = Point(q1, q2) # the arc's end point
r1 = C.x + math.cos(alfa + w45) * rad / math.cos(w45)
r2 = C.y + math.sin(alfa + w45) * rad / math.cos(w45)
R = Point(r1, r2) # crossing point of tangents
kappah = (1 - math.cos(w45)) * 4 / 3 / abs(R - Q)
kappa = kappah * abs(P - Q)
cp1 = P + (R - P) * kappa # control point 1
cp2 = Q + (R - Q) * kappa # control point 2
self.draw_cont += l4(*JM_TUPLE(
list(cp1 * self.ipctm) + list(cp2 * self.ipctm) + list(Q * self.ipctm)
))
betar -= w90 # reduce param angle by 90 deg
alfa += w90 # advance start angle by 90 deg
P = Q # advance to arc end point
# draw (remaining) arc
if abs(betar) > 1e-3: # significant degrees left?
beta2 = betar / 2
q1 = C.x + math.cos(alfa + betar) * rad
q2 = C.y + math.sin(alfa + betar) * rad
Q = Point(q1, q2) # the arc's end point
r1 = C.x + math.cos(alfa + beta2) * rad / math.cos(beta2)
r2 = C.y + math.sin(alfa + beta2) * rad / math.cos(beta2)
R = Point(r1, r2) # crossing point of tangents
# kappa height is 4/3 of segment height
kappah = (1 - math.cos(beta2)) * 4 / 3 / abs(R - Q) # kappa height
kappa = kappah * abs(P - Q) / (1 - math.cos(betar))
cp1 = P + (R - P) * kappa # control point 1
cp2 = Q + (R - Q) * kappa # control point 2
self.draw_cont += l4(*JM_TUPLE(
list(cp1 * self.ipctm) + list(cp2 * self.ipctm) + list(Q * self.ipctm)
))
if fullSector:
self.draw_cont += l3(*JM_TUPLE(point * self.ipctm))
self.draw_cont += l5(*JM_TUPLE(center * self.ipctm))
self.draw_cont += l5(*JM_TUPLE(Q * self.ipctm))
self.last_point = Q
return self.last_point
def draw_rect(self, rect: rect_like, *, radius=None) -> Point:
"""Draw a rectangle.
Args:
radius: if not None, the rectangle will have rounded corners.
This is the radius of the curvature, given as percentage of
the rectangle width or height. Valid are values 0 < v <= 0.5.
For a sequence of two values, the corners will have different
radii. Otherwise, the percentage will be computed from the
shorter side. A value of (0.5, 0.5) will draw an ellipse.
"""
r = Rect(rect)
if radius is None: # standard rectangle
self.draw_cont += _format_g(JM_TUPLE(
list(r.bl * self.ipctm) + [r.width, r.height]
)) + " re\n"
self.updateRect(r)
self.last_point = r.tl
return self.last_point
# rounded corners requested. This requires 1 or 2 values, each
# with 0 < value <= 0.5
if hasattr(radius, "__float__"):
if radius <= 0 or radius > 0.5:
raise ValueError(f"bad radius value {radius}.")
d = min(r.width, r.height) * radius
px = (d, 0)
py = (0, d)
elif hasattr(radius, "__len__") and len(radius) == 2:
rx, ry = radius
px = (rx * r.width, 0)
py = (0, ry * r.height)
if min(rx, ry) <= 0 or max(rx, ry) > 0.5:
raise ValueError(f"bad radius value {radius}.")
else:
raise ValueError(f"bad radius value {radius}.")
lp = self.draw_line(r.tl + py, r.bl - py)
lp = self.draw_curve(lp, r.bl, r.bl + px)
lp = self.draw_line(lp, r.br - px)
lp = self.draw_curve(lp, r.br, r.br - py)
lp = self.draw_line(lp, r.tr + py)
lp = self.draw_curve(lp, r.tr, r.tr - px)
lp = self.draw_line(lp, r.tl + px)
self.last_point = self.draw_curve(lp, r.tl, r.tl + py)
self.updateRect(r)
return self.last_point
def draw_quad(self, quad: quad_like) -> Point:
"""Draw a Quad."""
q = Quad(quad)
return self.draw_polyline([q.ul, q.ll, q.lr, q.ur, q.ul])
def draw_zigzag(
self,
p1: point_like,
p2: point_like,
breadth: float = 2,
) -> Point:
"""Draw a zig-zagged line from p1 to p2."""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = Matrix(util_hor_matrix(p1, p2)) # normalize line to x-axis
i_mat = ~matrix # get original position
points = [] # stores edges
for i in range(1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -1) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, 1) * mb
else: # ignore others
continue
points.append(p * i_mat)
self.draw_polyline([p1] + points + [p2]) # add start and end points
return p2
def draw_squiggle(
self,
p1: point_like,
p2: point_like,
breadth=2,
) -> Point:
"""Draw a squiggly line from p1 to p2."""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = Matrix(util_hor_matrix(p1, p2)) # normalize line to x-axis
i_mat = ~matrix # get original position
k = 2.4142135623765633 # y of draw_curve helper point
points = [] # stores edges
for i in range(1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -k) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, k) * mb
else: # else on connection line
p = Point(i, 0) * mb
points.append(p * i_mat)
points = [p1] + points + [p2]
cnt = len(points)
i = 0
while i + 2 < cnt:
self.draw_curve(points[i], points[i + 1], points[i + 2])
i += 2
return p2
# ==============================================================================
# Shape.insert_text
# ==============================================================================
def insert_text(
self,
point: point_like,
buffer: typing.Union[str, list],
*,
fontsize: float = 11,
lineheight: OptFloat = None,
fontname: str = "helv",
fontfile: OptStr = None,
set_simple: bool = 0,
encoding: int = 0,
color: OptSeq = None,
fill: OptSeq = None,
render_mode: int = 0,
border_width: float = 0.05,
miter_limit: float = 1,
rotate: int = 0,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> int:
# ensure 'text' is a list of strings, worth dealing with
if not bool(buffer):
return 0
if type(buffer) not in (list, tuple):
text = buffer.splitlines()
else:
text = buffer
if not len(text) > 0:
return 0
point = Point(point)
try:
maxcode = max([ord(c) for c in " ".join(text)])
except Exception:
exception_info()
return 0
# ensure valid 'fontname'
fname = fontname
if fname.startswith("/"):
fname = fname[1:]
xref = self.page.insert_font(
fontname=fname, fontfile=fontfile, encoding=encoding, set_simple=set_simple
)
fontinfo = CheckFontInfo(self.doc, xref)
fontdict = fontinfo[1]
ordering = fontdict["ordering"]
simple = fontdict["simple"]
bfname = fontdict["name"]
ascender = fontdict["ascender"]
descender = fontdict["descender"]
if lineheight:
lheight = fontsize * lineheight
elif ascender - descender <= 1:
lheight = fontsize * 1.2
else:
lheight = fontsize * (ascender - descender)
if maxcode > 255:
glyphs = self.doc.get_char_widths(xref, maxcode + 1)
else:
glyphs = fontdict["glyphs"]
tab = []
for t in text:
if simple and bfname not in ("Symbol", "ZapfDingbats"):
g = None
else:
g = glyphs
tab.append(getTJstr(t, g, simple, ordering))
text = tab
color_str = ColorCode(color, "c")
fill_str = ColorCode(fill, "f")
if not fill and render_mode == 0: # ensure fill color when 0 Tr
fill = color
fill_str = ColorCode(color, "f")
morphing = CheckMorph(morph)
rot = rotate
if rot % 90 != 0:
raise ValueError("bad rotate value")
while rot < 0:
rot += 360
rot = rot % 360 # text rotate = 0, 90, 270, 180
templ1 = lambda a, b, c, d, e, f, g: f"\nq\n{a}{b}BT\n{c}1 0 0 1 {_format_g((d, e))} Tm\n/{f} {_format_g(g)} Tf "
templ2 = lambda a: f"TJ\n0 -{_format_g(a)} TD\n"
cmp90 = "0 1 -1 0 0 0 cm\n" # rotates 90 deg counter-clockwise
cmm90 = "0 -1 1 0 0 0 cm\n" # rotates 90 deg clockwise
cm180 = "-1 0 0 -1 0 0 cm\n" # rotates by 180 deg.
height = self.height
width = self.width
# setting up for standard rotation directions
# case rotate = 0
if morphing:
m1 = Matrix(1, 0, 0, 1, morph[0].x + self.x, height - morph[0].y - self.y)
mat = ~m1 * morph[1] * m1
cm = _format_g(JM_TUPLE(mat)) + " cm\n"
else:
cm = ""
top = height - point.y - self.y # start of 1st char
left = point.x + self.x # start of 1. char
space = top # space available
#headroom = point.y + self.y # distance to page border
if rot == 90:
left = height - point.y - self.y
top = -point.x - self.x
cm += cmp90
space = width - abs(top)
#headroom = point.x + self.x
elif rot == 270:
left = -height + point.y + self.y
top = point.x + self.x
cm += cmm90
space = abs(top)
#headroom = width - point.x - self.x
elif rot == 180:
left = -point.x - self.x
top = -height + point.y + self.y
cm += cm180
space = abs(point.y + self.y)
#headroom = height - point.y - self.y
optcont = self.page._get_optional_content(oc)
if optcont is not None:
bdc = "/OC /%s BDC\n" % optcont
emc = "EMC\n"
else:
bdc = emc = ""
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha is None:
alpha = ""
else:
alpha = "/%s gs\n" % alpha
nres = templ1(bdc, alpha, cm, left, top, fname, fontsize)
if render_mode > 0:
nres += "%i Tr " % render_mode
nres += _format_g(border_width * fontsize) + " w "
if miter_limit is not None:
nres += _format_g(miter_limit) + " M "
if color is not None:
nres += color_str
if fill is not None:
nres += fill_str
# =========================================================================
# start text insertion
# =========================================================================
nres += text[0]
nlines = 1 # set output line counter
if len(text) > 1:
nres += templ2(lheight) # line 1
else:
nres += 'TJ'
for i in range(1, len(text)):
if space < lheight:
break # no space left on page
if i > 1:
nres += "\nT* "
nres += text[i] + 'TJ'
space -= lheight
nlines += 1
nres += "\nET\n%sQ\n" % emc
# =========================================================================
# end of text insertion
# =========================================================================
# update the /Contents object
self.text_cont += nres
return nlines
# ==============================================================================
# Shape.insert_textbox
# ==============================================================================
def insert_textbox(
self,
rect: rect_like,
buffer: typing.Union[str, list],
*,
fontname: OptStr = "helv",
fontfile: OptStr = None,
fontsize: float = 11,
lineheight: OptFloat = None,
set_simple: bool = 0,
encoding: int = 0,
color: OptSeq = None,
fill: OptSeq = None,
expandtabs: int = 1,
border_width: float = 0.05,
miter_limit: float = 1,
align: int = 0,
render_mode: int = 0,
rotate: int = 0,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> float:
"""Insert text into a given rectangle.
Args:
rect -- the textbox to fill
buffer -- text to be inserted
fontname -- a Base-14 font, font name or '/name'
fontfile -- name of a font file
fontsize -- font size
lineheight -- overwrite the font property
color -- RGB stroke color triple
fill -- RGB fill color triple
render_mode -- text rendering control
border_width -- thickness of glyph borders as percentage of fontsize
expandtabs -- handles tabulators with string function
align -- left, center, right, justified
rotate -- 0, 90, 180, or 270 degrees
morph -- morph box with a matrix and a fixpoint
Returns:
unused or deficit rectangle area (float)
"""
rect = Rect(rect)
if rect.is_empty or rect.is_infinite:
raise ValueError("text box must be finite and not empty")
color_str = ColorCode(color, "c")
fill_str = ColorCode(fill, "f")
if fill is None and render_mode == 0: # ensure fill color for 0 Tr
fill = color
fill_str = ColorCode(color, "f")
optcont = self.page._get_optional_content(oc)
if optcont is not None:
bdc = "/OC /%s BDC\n" % optcont
emc = "EMC\n"
else:
bdc = emc = ""
# determine opacity / transparency
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha is None:
alpha = ""
else:
alpha = "/%s gs\n" % alpha
if rotate % 90 != 0:
raise ValueError("rotate must be multiple of 90")
rot = rotate
while rot < 0:
rot += 360
rot = rot % 360
# is buffer worth of dealing with?
if not bool(buffer):
return rect.height if rot in (0, 180) else rect.width
cmp90 = "0 1 -1 0 0 0 cm\n" # rotates counter-clockwise
cmm90 = "0 -1 1 0 0 0 cm\n" # rotates clockwise
cm180 = "-1 0 0 -1 0 0 cm\n" # rotates by 180 deg.
height = self.height
fname = fontname
if fname.startswith("/"):
fname = fname[1:]
xref = self.page.insert_font(
fontname=fname, fontfile=fontfile, encoding=encoding, set_simple=set_simple
)
fontinfo = CheckFontInfo(self.doc, xref)
fontdict = fontinfo[1]
ordering = fontdict["ordering"]
simple = fontdict["simple"]
glyphs = fontdict["glyphs"]
bfname = fontdict["name"]
ascender = fontdict["ascender"]
descender = fontdict["descender"]
if lineheight:
lheight_factor = lineheight
elif ascender - descender <= 1:
lheight_factor = 1.2
else:
lheight_factor = ascender - descender
lheight = fontsize * lheight_factor
# create a list from buffer, split into its lines
if type(buffer) in (list, tuple):
t0 = "\n".join(buffer)
else:
t0 = buffer
maxcode = max([ord(c) for c in t0])
# replace invalid char codes for simple fonts
if simple and maxcode > 255:
t0 = "".join([c if ord(c) < 256 else "?" for c in t0])
t0 = t0.splitlines()
glyphs = self.doc.get_char_widths(xref, maxcode + 1)
if simple and bfname not in ("Symbol", "ZapfDingbats"):
tj_glyphs = None
else:
tj_glyphs = glyphs
# ----------------------------------------------------------------------
# calculate pixel length of a string
# ----------------------------------------------------------------------
def pixlen(x):
"""Calculate pixel length of x."""
if ordering < 0:
return sum([glyphs[ord(c)][1] for c in x]) * fontsize
else:
return len(x) * fontsize
# ---------------------------------------------------------------------
if ordering < 0:
blen = glyphs[32][1] * fontsize # pixel size of space character
else:
blen = fontsize
text = "" # output buffer
if CheckMorph(morph):
m1 = Matrix(
1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y
)
mat = ~m1 * morph[1] * m1
cm = _format_g(JM_TUPLE(mat)) + " cm\n"
else:
cm = ""
# ---------------------------------------------------------------------
# adjust for text orientation / rotation
# ---------------------------------------------------------------------
progr = 1 # direction of line progress
c_pnt = Point(0, fontsize * ascender) # used for line progress
if rot == 0: # normal orientation
point = rect.tl + c_pnt # line 1 is 'lheight' below top
maxwidth = rect.width # pixels available in one line
maxheight = rect.height # available text height
elif rot == 90: # rotate counter clockwise
c_pnt = Point(fontsize * ascender, 0) # progress in x-direction
point = rect.bl + c_pnt # line 1 'lheight' away from left
maxwidth = rect.height # pixels available in one line
maxheight = rect.width # available text height
cm += cmp90
elif rot == 180: # text upside down
# progress upwards in y direction
c_pnt = -Point(0, fontsize * ascender)
point = rect.br + c_pnt # line 1 'lheight' above bottom
maxwidth = rect.width # pixels available in one line
progr = -1 # subtract lheight for next line
maxheight =rect.height # available text height
cm += cm180
else: # rotate clockwise (270 or -90)
# progress from right to left
c_pnt = -Point(fontsize * ascender, 0)
point = rect.tr + c_pnt # line 1 'lheight' left of right
maxwidth = rect.height # pixels available in one line
progr = -1 # subtract lheight for next line
maxheight = rect.width # available text height
cm += cmm90
# =====================================================================
# line loop
# =====================================================================
just_tab = [] # 'justify' indicators per line
for i, line in enumerate(t0):
line_t = line.expandtabs(expandtabs).split(" ") # split into words
num_words = len(line_t)
lbuff = "" # init line buffer
rest = maxwidth # available line pixels
# =================================================================
# word loop
# =================================================================
for j in range(num_words):
word = line_t[j]
pl_w = pixlen(word) # pixel len of word
if rest >= pl_w: # does it fit on the line?
lbuff += word + " " # yes, append word
rest -= pl_w + blen # update available line space
continue # next word
# word doesn't fit - output line (if not empty)
if lbuff:
lbuff = lbuff.rstrip() + "\n" # line full, append line break
text += lbuff # append to total text
just_tab.append(True) # can align-justify
lbuff = "" # re-init line buffer
rest = maxwidth # re-init avail. space
if pl_w <= maxwidth: # word shorter than 1 line?
lbuff = word + " " # start the line with it
rest = maxwidth - pl_w - blen # update free space
continue
# long word: split across multiple lines - char by char ...
if len(just_tab) > 0:
just_tab[-1] = False # cannot align-justify
for c in word:
if pixlen(lbuff) <= maxwidth - pixlen(c):
lbuff += c
else: # line full
lbuff += "\n" # close line
text += lbuff # append to text
just_tab.append(False) # cannot align-justify
lbuff = c # start new line with this char
lbuff += " " # finish long word
rest = maxwidth - pixlen(lbuff) # long word stored
if lbuff: # unprocessed line content?
text += lbuff.rstrip() # append to text
just_tab.append(False) # cannot align-justify
if i < len(t0) - 1: # not the last line?
text += "\n" # insert line break
# compute used part of the textbox
if text.endswith("\n"):
text = text[:-1]
lb_count = text.count("\n") + 1 # number of lines written
# text height = line count * line height plus one descender value
text_height = lheight * lb_count - descender * fontsize
more = text_height - maxheight # difference to height limit
if more > EPSILON: # landed too much outside rect
return (-1) * more # return deficit, don't output
more = abs(more)
if more < EPSILON:
more = 0 # don't bother with epsilons
nres = "\nq\n%s%sBT\n" % (bdc, alpha) + cm # initialize output buffer
templ = lambda a, b, c, d: f"1 0 0 1 {_format_g((a, b))} Tm /{c} {_format_g(d)} Tf "
# center, right, justify: output each line with its own specifics
text_t = text.splitlines() # split text in lines again
just_tab[-1] = False # never justify last line
for i, t in enumerate(text_t):
spacing = 0
pl = maxwidth - pixlen(t) # length of empty line part
pnt = point + c_pnt * (i * lheight_factor) # text start of line
if align == 1: # center: right shift by half width
if rot in (0, 180):
pnt = pnt + Point(pl / 2, 0) * progr
else:
pnt = pnt - Point(0, pl / 2) * progr
elif align == 2: # right: right shift by full width
if rot in (0, 180):
pnt = pnt + Point(pl, 0) * progr
else:
pnt = pnt - Point(0, pl) * progr
elif align == 3: # justify
spaces = t.count(" ") # number of spaces in line
if spaces > 0 and just_tab[i]: # if any, and we may justify
spacing = pl / spaces # make every space this much larger
else:
spacing = 0 # keep normal space length
top = height - pnt.y - self.y
left = pnt.x + self.x
if rot == 90:
left = height - pnt.y - self.y
top = -pnt.x - self.x
elif rot == 270:
left = -height + pnt.y + self.y
top = pnt.x + self.x
elif rot == 180:
left = -pnt.x - self.x
top = -height + pnt.y + self.y
nres += templ(left, top, fname, fontsize)
if render_mode > 0:
nres += "%i Tr " % render_mode
nres += _format_g(border_width * fontsize) + " w "
if miter_limit is not None:
nres += _format_g(miter_limit) + " M "
if align == 3:
nres += _format_g(spacing) + " Tw "
if color is not None:
nres += color_str
if fill is not None:
nres += fill_str
nres += "%sTJ\n" % getTJstr(t, tj_glyphs, simple, ordering)
nres += "ET\n%sQ\n" % emc
self.text_cont += nres
self.updateRect(rect)
return more
def finish(
self,
width: float = 1,
color: OptSeq = (0,),
fill: OptSeq = None,
lineCap: int = 0,
lineJoin: int = 0,
dashes: OptStr = None,
even_odd: bool = False,
morph: OptSeq = None,
closePath: bool = True,
fill_opacity: float = 1,
stroke_opacity: float = 1,
oc: int = 0,
) -> None:
"""Finish the current drawing segment.
Notes:
Apply colors, opacity, dashes, line style and width, or
morphing. Also whether to close the path
by connecting last to first point.
"""
if self.draw_cont == "": # treat empty contents as no-op
return
if width == 0: # border color makes no sense then
color = None
elif color is None: # vice versa
width = 0
# if color == None and fill == None:
# raise ValueError("at least one of 'color' or 'fill' must be given")
color_str = ColorCode(color, "c") # ensure proper color string
fill_str = ColorCode(fill, "f") # ensure proper fill string
optcont = self.page._get_optional_content(oc)
if optcont is not None:
self.draw_cont = "/OC /%s BDC\n" % optcont + self.draw_cont
emc = "EMC\n"
else:
emc = ""
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha is not None:
self.draw_cont = "/%s gs\n" % alpha + self.draw_cont
if width != 1 and width != 0:
self.draw_cont += _format_g(width) + " w\n"
if lineCap != 0:
self.draw_cont = "%i J\n" % lineCap + self.draw_cont
if lineJoin != 0:
self.draw_cont = "%i j\n" % lineJoin + self.draw_cont
if dashes not in (None, "", "[] 0"):
self.draw_cont = "%s d\n" % dashes + self.draw_cont
if closePath:
self.draw_cont += "h\n"
self.last_point = None
if color is not None:
self.draw_cont += color_str
if fill is not None:
self.draw_cont += fill_str
if color is not None:
if not even_odd:
self.draw_cont += "B\n"
else:
self.draw_cont += "B*\n"
else:
if not even_odd:
self.draw_cont += "f\n"
else:
self.draw_cont += "f*\n"
else:
self.draw_cont += "S\n"
self.draw_cont += emc
if CheckMorph(morph):
m1 = Matrix(
1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y
)
mat = ~m1 * morph[1] * m1
self.draw_cont = _format_g(JM_TUPLE(mat)) + " cm\n" + self.draw_cont
self.totalcont += "\nq\n" + self.draw_cont + "Q\n"
self.draw_cont = ""
self.last_point = None
return
def commit(self, overlay: bool = True) -> None:
"""Update the page's /Contents object with Shape data.
The argument controls whether data appear in foreground (default)
or background.
"""
CheckParent(self.page) # doc may have died meanwhile
self.totalcont += self.text_cont
self.totalcont = self.totalcont.encode()
if self.totalcont:
if overlay:
self.page.wrap_contents() # ensure a balanced graphics state
# make /Contents object with dummy stream
xref = TOOLS._insert_contents(self.page, b" ", overlay)
# update it with potential compression
self.doc.update_stream(xref, self.totalcont)
self.last_point = None # clean up ...
self.rect = None #
self.draw_cont = "" # for potential ...
self.text_cont = "" # ...
self.totalcont = "" # re-use
| Shape |
python | Farama-Foundation__Gymnasium | docs/tutorials/training_agents/frozenlake_q_learning.py | {
"start": 3623,
"end": 17295
} | class ____:
def __init__(self, epsilon):
self.epsilon = epsilon
def choose_action(self, action_space, state, qtable):
"""Choose an action `a` in the current world state (s)."""
# First we randomize a number
explor_exploit_tradeoff = rng.uniform(0, 1)
# Exploration
if explor_exploit_tradeoff < self.epsilon:
action = action_space.sample()
# Exploitation (taking the biggest Q-value for this state)
else:
# Break ties randomly
# Find the indices where the Q-value equals the maximum value
# Choose a random action from the indices where the Q-value is maximum
max_ids = np.where(qtable[state, :] == max(qtable[state, :]))[0]
action = rng.choice(max_ids)
return action
# %%
# Running the environment
# ~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's instantiate the learner and the explorer.
#
learner = Qlearning(
learning_rate=params.learning_rate,
gamma=params.gamma,
state_size=params.state_size,
action_size=params.action_size,
)
explorer = EpsilonGreedy(
epsilon=params.epsilon,
)
# %%
# This will be our main function to run our environment until the maximum
# number of episodes ``params.total_episodes``. To account for
# stochasticity, we will also run our environment a few times.
#
def run_env():
rewards = np.zeros((params.total_episodes, params.n_runs))
steps = np.zeros((params.total_episodes, params.n_runs))
episodes = np.arange(params.total_episodes)
qtables = np.zeros((params.n_runs, params.state_size, params.action_size))
all_states = []
all_actions = []
for run in range(params.n_runs): # Run several times to account for stochasticity
learner.reset_qtable() # Reset the Q-table between runs
for episode in tqdm(
episodes, desc=f"Run {run}/{params.n_runs} - Episodes", leave=False
):
state = env.reset(seed=params.seed)[0] # Reset the environment
step = 0
done = False
total_rewards = 0
while not done:
action = explorer.choose_action(
action_space=env.action_space, state=state, qtable=learner.qtable
)
# Log all states and actions
all_states.append(state)
all_actions.append(action)
# Take the action (a) and observe the outcome state(s') and reward (r)
new_state, reward, terminated, truncated, info = env.step(action)
done = terminated or truncated
learner.qtable[state, action] = learner.update(
state, action, reward, new_state
)
total_rewards += reward
step += 1
# Our new state is state
state = new_state
# Log all rewards and steps
rewards[episode, run] = total_rewards
steps[episode, run] = step
qtables[run, :, :] = learner.qtable
return rewards, steps, episodes, qtables, all_states, all_actions
# %%
# Visualization
# ~~~~~~~~~~~~~
#
# %%
# To make it easy to plot the results with Seaborn, we'll save the main
# results of the simulation in Pandas dataframes.
#
def postprocess(episodes, params, rewards, steps, map_size):
"""Convert the results of the simulation in dataframes."""
res = pd.DataFrame(
data={
"Episodes": np.tile(episodes, reps=params.n_runs),
"Rewards": rewards.flatten(order="F"),
"Steps": steps.flatten(order="F"),
}
)
res["cum_rewards"] = rewards.cumsum(axis=0).flatten(order="F")
res["map_size"] = np.repeat(f"{map_size}x{map_size}", res.shape[0])
st = pd.DataFrame(data={"Episodes": episodes, "Steps": steps.mean(axis=1)})
st["map_size"] = np.repeat(f"{map_size}x{map_size}", st.shape[0])
return res, st
# %%
# We want to plot the policy the agent has learned in the end. To do that
# we will: 1. extract the best Q-values from the Q-table for each state,
# 2. get the corresponding best action for those Q-values, 3. map each
# action to an arrow so we can visualize it.
#
def qtable_directions_map(qtable, map_size):
"""Get the best learned action & map it to arrows."""
qtable_val_max = qtable.max(axis=1).reshape(map_size, map_size)
qtable_best_action = np.argmax(qtable, axis=1).reshape(map_size, map_size)
directions = {0: "←", 1: "↓", 2: "→", 3: "↑"}
qtable_directions = np.empty(qtable_best_action.flatten().shape, dtype=str)
eps = np.finfo(float).eps # Minimum float number on the machine
for idx, val in enumerate(qtable_best_action.flatten()):
if qtable_val_max.flatten()[idx] > eps:
# Assign an arrow only if a minimal Q-value has been learned as best action
# otherwise since 0 is a direction, it also gets mapped on the tiles where
# it didn't actually learn anything
qtable_directions[idx] = directions[val]
qtable_directions = qtable_directions.reshape(map_size, map_size)
return qtable_val_max, qtable_directions
# %%
# With the following function, we'll plot on the left the last frame of
# the simulation. If the agent learned a good policy to solve the task, we
# expect to see it on the tile of the treasure in the last frame of the
# video. On the right we'll plot the policy the agent has learned. Each
# arrow will represent the best action to choose for each tile/state.
#
def plot_q_values_map(qtable, env, map_size):
"""Plot the last frame of the simulation and the policy learned."""
qtable_val_max, qtable_directions = qtable_directions_map(qtable, map_size)
# Plot the last frame
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15, 5))
ax[0].imshow(env.render())
ax[0].axis("off")
ax[0].set_title("Last frame")
# Plot the policy
sns.heatmap(
qtable_val_max,
annot=qtable_directions,
fmt="",
ax=ax[1],
cmap=sns.color_palette("Blues", as_cmap=True),
linewidths=0.7,
linecolor="black",
xticklabels=[],
yticklabels=[],
annot_kws={"fontsize": "xx-large"},
).set(title="Learned Q-values\nArrows represent best action")
for _, spine in ax[1].spines.items():
spine.set_visible(True)
spine.set_linewidth(0.7)
spine.set_color("black")
plt.show()
# %%
# As a sanity check, we will plot the distributions of states and actions
# with the following function:
#
def plot_states_actions_distribution(states, actions, map_size):
"""Plot the distributions of states and actions."""
labels = {"LEFT": 0, "DOWN": 1, "RIGHT": 2, "UP": 3}
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15, 5))
sns.histplot(data=states, ax=ax[0], kde=True)
ax[0].set_title("States")
sns.histplot(data=actions, ax=ax[1])
ax[1].set_xticks(list(labels.values()), labels=labels.keys())
ax[1].set_title("Actions")
fig.tight_layout()
plt.show()
# %%
# Now we'll be running our agent on a few increasing maps sizes: -
# :math:`4 \times 4`, - :math:`7 \times 7`, - :math:`9 \times 9`, -
# :math:`11 \times 11`.
#
# Putting it all together:
#
map_sizes = [4, 7, 9, 11]
res_all = pd.DataFrame()
st_all = pd.DataFrame()
for map_size in map_sizes:
env = gym.make(
"FrozenLake-v1",
is_slippery=params.is_slippery,
render_mode="rgb_array",
desc=generate_random_map(
size=map_size, p=params.proba_frozen, seed=params.seed
),
)
params = params._replace(action_size=env.action_space.n)
params = params._replace(state_size=env.observation_space.n)
env.action_space.seed(
params.seed
) # Set the seed to get reproducible results when sampling the action space
learner = Qlearning(
learning_rate=params.learning_rate,
gamma=params.gamma,
state_size=params.state_size,
action_size=params.action_size,
)
explorer = EpsilonGreedy(
epsilon=params.epsilon,
)
print(f"Map size: {map_size}x{map_size}")
rewards, steps, episodes, qtables, all_states, all_actions = run_env()
# Save the results in dataframes
res, st = postprocess(episodes, params, rewards, steps, map_size)
res_all = pd.concat([res_all, res])
st_all = pd.concat([st_all, st])
qtable = qtables.mean(axis=0) # Average the Q-table between runs
plot_states_actions_distribution(
states=all_states, actions=all_actions, map_size=map_size
) # Sanity check
plot_q_values_map(qtable, env, map_size)
env.close()
# %%
# Map size: :math:`4 \times 4`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# %%
# |States actions histogram 4x4 map| |Q-values 4x4 map|
#
# .. |States actions histogram 4x4 map| image:: ../../_static/img/tutorials/frozenlake_states_actions_distrib_4x4.png
# .. |Q-values 4x4 map| image:: ../../_static/img/tutorials/frozenlake_q_values_4x4.png
#
#
# %%
# Map size: :math:`7 \times 7`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# %%
# |States actions histogram 7x7 map| |Q-values 7x7 map|
#
# .. |States actions histogram 7x7 map| image:: ../../_static/img/tutorials/frozenlake_states_actions_distrib_7x7.png
# .. |Q-values 7x7 map| image:: ../../_static/img/tutorials/frozenlake_q_values_7x7.png
#
#
# %%
# Map size: :math:`9 \times 9`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# %%
# |States actions histogram 9x9 map| |Q-values 9x9 map|
#
# .. |States actions histogram 9x9 map| image:: ../../_static/img/tutorials/frozenlake_states_actions_distrib_9x9.png
# .. |Q-values 9x9 map| image:: ../../_static/img/tutorials/frozenlake_q_values_9x9.png
#
#
# %%
# Map size: :math:`11 \times 11`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# %%
# |States actions histogram 11x11 map| |Q-values 11x11 map|
#
# .. |States actions histogram 11x11 map| image:: ../../_static/img/tutorials/frozenlake_states_actions_distrib_11x11.png
# .. |Q-values 11x11 map| image:: ../../_static/img/tutorials/frozenlake_q_values_11x11.png
#
#
# %%
# The ``DOWN`` and ``RIGHT`` actions get chosen more often, which makes
# sense as the agent starts at the top left of the map and needs to find
# its way down to the bottom right. Also the bigger the map, the less
# states/tiles further away from the starting state get visited.
#
# %%
# To check if our agent is learning, we want to plot the cumulated sum of
# rewards, as well as the number of steps needed until the end of the
# episode. If our agent is learning, we expect to see the cumulated sum of
# rewards to increase and the number of steps to solve the task to
# decrease.
#
def plot_steps_and_rewards(rewards_df, steps_df):
"""Plot the steps and rewards from dataframes."""
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15, 5))
sns.lineplot(
data=rewards_df, x="Episodes", y="cum_rewards", hue="map_size", ax=ax[0]
)
ax[0].set(ylabel="Cumulated rewards")
sns.lineplot(data=steps_df, x="Episodes", y="Steps", hue="map_size", ax=ax[1])
ax[1].set(ylabel="Averaged steps number")
for axi in ax:
axi.legend(title="map size")
fig.tight_layout()
plt.show()
plot_steps_and_rewards(res_all, st_all)
# %%
# |Steps and rewards|
#
# .. |Steps and rewards| image:: ../../_static/img/tutorials/frozenlake_steps_and_rewards.png
#
#
# %%
# On the :math:`4 \times 4` map, learning converges pretty quickly,
# whereas on the :math:`7 \times 7` map, the agent needs :math:`\sim 300`
# episodes, on the :math:`9 \times 9` map it needs :math:`\sim 800`
# episodes, and the :math:`11 \times 11` map, it needs :math:`\sim 1800`
# episodes to converge. Interestingly, the agent seems to be getting more
# rewards on the :math:`9 \times 9` map than on the :math:`7 \times 7`
# map, which could mean it didn't reach an optimal policy on the
# :math:`7 \times 7` map.
#
# %%
# In the end, if agent doesn't get any rewards, rewards don't get
# propagated in the Q-values, and the agent doesn't learn anything. In my
# experience on this environment using :math:`\epsilon`-greedy and those
# hyperparameters and environment settings, maps having more than
# :math:`11 \times 11` tiles start to be difficult to solve. Maybe using a
# different exploration algorithm could overcome this. The other parameter
# having a big impact is the ``proba_frozen``, the probability of the tile
# being frozen. With too many holes, i.e. :math:`p<0.9`, Q-learning is
# having a hard time in not falling into holes and getting a reward
# signal.
#
# %%
# References
# ----------
#
# - Code inspired by `Deep Reinforcement Learning
# Course <https://simoninithomas.github.io/Deep_reinforcement_learning_Course/>`__
# by Thomas Simonini (http://simoninithomas.com/)
# - `Dissecting Reinforcement
# Learning-Part.2 <https://mpatacchiola.github.io/blog/2017/01/15/dissecting-reinforcement-learning-2.html>`__
# - `David Silver’s course <https://www.davidsilver.uk/teaching/>`__ in
# particular lesson 4 and lesson 5
# - `Q-learning article on
# Wikipedia <https://en.wikipedia.org/wiki/Q-learning>`__
# - `Q-Learning: Off-Policy TD
# Control <http://incompleteideas.net/book/ebook/node65.html>`__ in
# `Reinforcement Learning: An Introduction, by Richard S. Sutton and
# Andrew G. Barto <http://incompleteideas.net/book/ebook/>`__
# - `Epsilon-Greedy
# Q-learning <https://www.baeldung.com/cs/epsilon-greedy-q-learning>`__
# - `Introduction to Reinforcement
# Learning <https://gibberblot.github.io/rl-notes/index.html>`__ by Tim
# Miller (University of Melbourne)
#
| EpsilonGreedy |
python | getsentry__sentry | src/sentry/deletions/defaults/platform_external_issue.py | {
"start": 174,
"end": 402
} | class ____(ModelDeletionTask[PlatformExternalIssue]):
def mark_deletion_in_progress(self, instance_list: Sequence[PlatformExternalIssue]) -> None:
# No status to track this.
pass
| PlatformExternalIssueDeletionTask |
python | google__pytype | pytype/rewrite/abstract/classes.py | {
"start": 618,
"end": 5270
} | class ____(base.BaseValue):
"""Class with a name and members."""
def __init__(
self,
ctx: base.ContextType,
name: str,
members: dict[str, base.BaseValue],
bases: Sequence['SimpleClass'] = (),
keywords: Mapping[str, base.BaseValue] = datatypes.EMPTY_MAP,
module: str | None = None,
):
super().__init__(ctx)
self.name = name
self.members = members
self.bases = bases
self.keywords = keywords
self.module = module
self._canonical_instance: Optional['FrozenInstance'] = None
self._mro: Sequence['SimpleClass'] | None = None
if isinstance((init := members.get('__init__')),
functions_lib.SimpleFunction):
# An __init__ method is required to return None.
for sig in init.signatures:
if 'return' not in sig.annotations:
sig.annotations['return'] = ctx.consts[None]
# These methods are attributes of individual classes so that they can be
# easily customized. For example, unittest.TestCase would want to add
# 'setUpClass' to its setup methods and 'setUp' to its initializers.
# classmethods called on a class immediately after creation
self.setup_methods: list[str] = []
# classmethod called to create a class instance
self.constructor = '__new__'
# instance methods called on an instance immediately after creation
self.initializers = ['__init__']
def __repr__(self):
return f'SimpleClass({self.full_name})'
@property
def _attrs(self):
return (self.module, self.name)
@property
def full_name(self):
if self.module:
return f'{self.module}.{self.name}'
else:
return self.name
@property
def metaclass(self) -> base.BaseValue | None:
for cls in self.mro():
if 'metaclass' in cls.keywords:
return cls.keywords['metaclass']
return None
def get_attribute(self, name: str) -> base.BaseValue | None:
if name in self.members:
return self.members[name]
mro = self.mro()
if len(mro) > 1:
return mro[1].get_attribute(name)
return None
def set_attribute(self, name: str, value: base.BaseValue) -> None:
# SimpleClass is used to model imported classes, which we treat as frozen.
log.info('Ignoring attribute set on %r: %s -> %r', self, name, value)
def instantiate(self) -> 'FrozenInstance':
"""Creates an instance of this class."""
if self._canonical_instance:
if self.module not in ('builtins', 'typing'):
log.info('Getting cached instance of class %s', self.full_name)
return self._canonical_instance
log.info('Instantiating class %s', self.full_name)
for setup_method_name in self.setup_methods:
setup_method = self.get_attribute(setup_method_name)
if isinstance(setup_method, functions_lib.InterpreterFunction):
_ = setup_method.bind_to(self).analyze()
constructor = self.get_attribute(self.constructor)
if constructor and constructor.full_name != 'builtins.object.__new__':
log.error('Custom __new__ not yet implemented')
instance = MutableInstance(self._ctx, self)
for initializer_name in self.initializers:
initializer = self.get_attribute(initializer_name)
if isinstance(initializer, functions_lib.InterpreterFunction):
_ = initializer.bind_to(instance).analyze()
self._canonical_instance = frozen_instance = instance.freeze()
return frozen_instance
def call(self, args: functions_lib.Args) -> ClassCallReturn:
constructor = self.get_attribute(self.constructor)
if constructor:
log.error('Custom __new__ not yet implemented')
instance = MutableInstance(self._ctx, self)
for initializer_name in self.initializers:
initializer = self.get_attribute(initializer_name)
if isinstance(initializer, functions_lib.InterpreterFunction):
_ = initializer.bind_to(instance).call(args)
return ClassCallReturn(instance)
def mro(self) -> Sequence['SimpleClass']:
if self._mro:
return self._mro
if self.full_name == 'builtins.object':
self._mro = mro = [self]
return mro
bases = list(self.bases)
obj_type = self._ctx.types[object]
if not bases or bases[-1] != obj_type:
bases.append(obj_type)
mro_bases = [[self]] + [list(base.mro()) for base in bases] + [bases]
self._mro = mro = mro_lib.MROMerge(mro_bases)
return mro
def set_type_parameters(self, params):
# A dummy implementation to let type annotations with parameters not crash.
del params # not implemented yet
# We eventually want to return a new class with the type parameters set
return self
| SimpleClass |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/tests/test_completion_widget.py | {
"start": 852,
"end": 3343
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" Create the application for the test case.
"""
cls._app = QtWidgets.QApplication.instance()
if cls._app is None:
cls._app = QtWidgets.QApplication([])
cls._app.setQuitOnLastWindowClosed(False)
@classmethod
def tearDownClass(cls):
""" Exit the application.
"""
QtWidgets.QApplication.quit()
def setUp(self):
""" Create the main widgets (ConsoleWidget)
"""
self.console = ConsoleWidget()
self.text_edit = self.console._control
def test_droplist_completer_shows(self):
w = CompletionWidget(self.console)
w.show_items(self.text_edit.textCursor(), ["item1", "item2", "item3"])
self.assertTrue(w.isVisible())
def test_droplist_completer_keyboard(self):
w = CompletionWidget(self.console)
w.show_items(self.text_edit.textCursor(), ["item1", "item2", "item3"])
QTest.keyClick(w, QtCore.Qt.Key_PageDown)
QTest.keyClick(w, QtCore.Qt.Key_Enter)
self.assertEqual(self.text_edit.toPlainText(), "item3")
def test_droplist_completer_mousepick(self):
leftButton = QtCore.Qt.LeftButton
w = CompletionWidget(self.console)
w.show_items(self.text_edit.textCursor(), ["item1", "item2", "item3"])
QTest.mouseClick(w.viewport(), leftButton, pos=QtCore.QPoint(19, 8))
QTest.mouseRelease(w.viewport(), leftButton, pos=QtCore.QPoint(19, 8))
QTest.mouseDClick(w.viewport(), leftButton, pos=QtCore.QPoint(19, 8))
self.assertEqual(self.text_edit.toPlainText(), "item1")
self.assertFalse(w.isVisible())
def test_common_path_complete(self):
with TemporaryDirectory() as tmpdir:
items = [
os.path.join(tmpdir, "common/common1/item1"),
os.path.join(tmpdir, "common/common1/item2"),
os.path.join(tmpdir, "common/common1/item3")]
for item in items:
os.makedirs(item)
w = CompletionWidget(self.console)
w.show_items(self.text_edit.textCursor(), items)
self.assertEqual(w.currentItem().text(), '/item1')
QTest.keyClick(w, QtCore.Qt.Key_Down)
self.assertEqual(w.currentItem().text(), '/item2')
QTest.keyClick(w, QtCore.Qt.Key_Down)
self.assertEqual(w.currentItem().text(), '/item3')
| TestCompletionWidget |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 346686,
"end": 350816
} | class ____(StatNode):
"""
Represents a Python with statement.
Implemented by the WithTransform as follows:
MGR = EXPR
EXIT = MGR.__exit__
VALUE = MGR.__enter__()
EXC = True
try:
try:
TARGET = VALUE # optional
BODY
except:
EXC = False
if not EXIT(*EXCINFO):
raise
finally:
if EXC:
EXIT(None, None, None)
MGR = EXIT = VALUE = None
"""
# manager The with statement manager object
# target ExprNode the target lhs of the __enter__() call
# body StatNode
# enter_call ExprNode the call to the __enter__() method
# exit_var String the cname of the __exit__() method reference
child_attrs = ["manager", "enter_call", "target", "body"]
enter_call = None
target_temp = None
def analyse_declarations(self, env):
self.manager.analyse_declarations(env)
self.enter_call.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.manager = self.manager.analyse_types(env)
if self.manager.type.is_cython_lock_type:
return CythonLockStatNode.from_withstat(self).analyse_expressions(env)
self.enter_call = self.enter_call.analyse_types(env)
if self.target:
# set up target_temp before descending into body (which uses it)
from .ExprNodes import TempNode
self.target_temp = TempNode(self.enter_call.pos, self.enter_call.type)
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.manager.generate_function_definitions(env, code)
self.enter_call.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.putln("/*with:*/ {")
self.manager.generate_evaluation_code(code)
self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_LookupSpecial(%s, %s); %s" % (
self.exit_var,
self.manager.py_result(),
code.intern_identifier(EncodedString('__aexit__' if self.is_async else '__exit__')),
code.error_goto_if_null(self.exit_var, self.pos),
))
code.put_gotref(self.exit_var, py_object_type)
# need to free exit_var in the face of exceptions during setup
old_error_label = code.new_error_label()
intermediate_error_label = code.error_label
self.enter_call.generate_evaluation_code(code)
if self.target:
# The temp result will be cleaned up by the WithTargetAssignmentStatNode
# after assigning its result to the target of the 'with' statement.
self.target_temp.allocate(code)
self.enter_call.make_owned_reference(code)
code.putln("%s = %s;" % (self.target_temp.result(), self.enter_call.result()))
self.enter_call.generate_post_assignment_code(code)
else:
self.enter_call.generate_disposal_code(code)
self.enter_call.free_temps(code)
self.manager.generate_disposal_code(code)
self.manager.free_temps(code)
code.error_label = old_error_label
self.body.generate_execution_code(code)
if code.label_used(intermediate_error_label):
step_over_label = code.new_label()
code.put_goto(step_over_label)
code.put_label(intermediate_error_label)
code.put_decref_clear(self.exit_var, py_object_type)
code.put_goto(old_error_label)
code.put_label(step_over_label)
code.funcstate.release_temp(self.exit_var)
code.putln('}')
| WithStatNode |
python | Textualize__textual | src/textual/widgets/_option_list.py | {
"start": 2615,
"end": 3009
} | class ____:
"""Cached line information."""
lines: list[tuple[int, int]] = field(default_factory=list)
heights: dict[int, int] = field(default_factory=dict)
index_to_line: dict[int, int] = field(default_factory=dict)
def clear(self) -> None:
"""Reset all caches."""
self.lines.clear()
self.heights.clear()
self.index_to_line.clear()
| _LineCache |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/customizing-existing-component/7-component.py | {
"start": 183,
"end": 580
} | class ____(SlingReplicationCollectionComponent):
def execute(
self,
context: dg.AssetExecutionContext,
sling: SlingResource,
replication_spec_model: SlingReplicationSpecModel,
) -> Iterator:
context.log.info("*******************CUSTOM*************************")
return sling.replicate(context=context, debug=True)
| CustomSlingReplicationComponent |
python | readthedocs__readthedocs.org | readthedocs/proxito/views/serve.py | {
"start": 25806,
"end": 29560
} | class ____(CDNCacheControlMixin, CDNCacheTagsMixin, ServeDocsMixin, View):
"""Serve robots.txt from the domain's root."""
# Always cache this view, since it's the same for all users.
cache_response = True
# Extra cache tag to invalidate only this view if needed.
project_cache_tag = "robots.txt"
def get(self, request):
"""
Serve custom user's defined ``/robots.txt``.
If the project is delisted or is a spam project, we force a special robots.txt.
If the user added a ``robots.txt`` in the "default version" of the
project, we serve it directly.
"""
project = request.unresolved_domain.project
if project.delisted:
return render(
request,
"robots.delisted.txt",
content_type="text/plain",
)
# Verify if the project is marked as spam and return a custom robots.txt
if "readthedocsext.spamfighting" in settings.INSTALLED_APPS:
from readthedocsext.spamfighting.utils import is_robotstxt_denied # noqa
if is_robotstxt_denied(project):
return render(
request,
"robots.spam.txt",
content_type="text/plain",
)
# Use the ``robots.txt`` file from the default version configured
version_slug = project.get_default_version()
version = project.versions.get(slug=version_slug)
no_serve_robots_txt = any(
[
# If the default version is private or,
version.privacy_level == PRIVATE,
# default version is not active or,
not version.active,
# default version is not built
not version.built,
]
)
if no_serve_robots_txt:
# ... we do return a 404
raise Http404()
structlog.contextvars.bind_contextvars(
project_slug=project.slug,
version_slug=version.slug,
)
try:
response = self._serve_docs(
request=request,
project=project,
version=version,
filename="robots.txt",
check_if_exists=True,
)
log.info("Serving custom robots.txt file.")
return response
except StorageFileNotFound:
pass
# Serve default robots.txt
sitemap_url = "{scheme}://{domain}/sitemap.xml".format(
scheme="https",
domain=project.subdomain(),
)
context = {
"sitemap_url": sitemap_url,
"hidden_paths": self._get_hidden_paths(project),
}
return render(
request,
"robots.txt",
context,
content_type="text/plain",
)
def _get_hidden_paths(self, project):
"""Get the absolute paths of the public hidden versions of `project`."""
hidden_versions = project.versions(manager=INTERNAL).public().filter(hidden=True)
resolver = Resolver()
hidden_paths = [
resolver.resolve_path(project, version_slug=version.slug) for version in hidden_versions
]
return hidden_paths
def _get_project(self):
# Method used by the CDNCacheTagsMixin class.
return self.request.unresolved_domain.project
def _get_version(self):
# Method used by the CDNCacheTagsMixin class.
# This view isn't explicitly mapped to a version,
# but it can be when we serve a custom robots.txt file.
# TODO: refactor how we set cache tags to avoid this.
return None
| ServeRobotsTXTBase |
python | openai__openai-python | src/openai/types/beta/threads/refusal_content_block.py | {
"start": 197,
"end": 310
} | class ____(BaseModel):
refusal: str
type: Literal["refusal"]
"""Always `refusal`."""
| RefusalContentBlock |
python | getsentry__sentry | src/sentry/search/events/fields.py | {
"start": 29112,
"end": 30593
} | class ____(FunctionArg):
def __init__(
self,
name: str,
unquote: bool | None = False,
unescape_quotes: bool | None = False,
optional_unquote: bool | None = False,
allowed_strings: list[str] | None = None,
):
"""
:param str name: The name of the function, this refers to the name to invoke.
:param boolean unquote: Whether to try unquoting the arg or not
:param boolean unescape_quotes: Whether quotes within the string should be unescaped
:param boolean optional_unquote: Don't error when unable to unquote
"""
super().__init__(name)
self.unquote = unquote
self.unescape_quotes = unescape_quotes
self.optional_unquote = optional_unquote
self.allowed_strings = allowed_strings
def normalize(self, value: str, params: ParamsType, combinator: Combinator | None) -> str:
if self.unquote:
if len(value) < 2 or value[0] != '"' or value[-1] != '"':
if not self.optional_unquote:
raise InvalidFunctionArgument("string should be quoted")
else:
value = value[1:-1]
if self.unescape_quotes:
value = re.sub(r'\\"', '"', value)
if self.allowed_strings:
if value not in self.allowed_strings:
raise InvalidFunctionArgument(f"string must be one of {self.allowed_strings}")
return f"'{value}'"
| StringArg |
python | pytest-dev__pytest | src/_pytest/_code/code.py | {
"start": 48002,
"end": 48240
} | class ____(TerminalRepr):
lines: Sequence[str]
style: ClassVar[TracebackStyle] = "native"
def toterminal(self, tw: TerminalWriter) -> None:
tw.write("".join(self.lines))
@dataclasses.dataclass(eq=False)
| ReprEntryNative |
python | getsentry__sentry | tests/sentry/eventtypes/test_nel.py | {
"start": 89,
"end": 482
} | class ____(TestCase):
def test_get_metadata(self) -> None:
inst = NelEvent()
data = {
"logentry": {"formatted": "connection / tcp.refused"},
"request": {"url": "https://example.com/"},
}
assert inst.get_metadata(data) == {
"title": "connection / tcp.refused",
"uri": "https://example.com/",
}
| NelEventTest |
python | getsentry__sentry-python | tests/integrations/tornado/test_tornado.py | {
"start": 1372,
"end": 13707
} | class ____(RequestHandler):
async def get(self):
sentry_sdk.get_isolation_scope().set_tag("foo", "42")
return b"hello"
async def post(self):
sentry_sdk.get_isolation_scope().set_tag("foo", "43")
return b"hello"
def test_basic(tornado_testcase, sentry_init, capture_events):
sentry_init(integrations=[TornadoIntegration()], send_default_pii=True)
events = capture_events()
client = tornado_testcase(Application([(r"/hi", CrashingHandler)]))
response = client.fetch(
"/hi?foo=bar", headers={"Cookie": "name=value; name2=value2; name3=value3"}
)
assert response.code == 500
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"]["type"] == "tornado"
request = event["request"]
host = request["headers"]["Host"]
assert event["request"] == {
"env": {"REMOTE_ADDR": "127.0.0.1"},
"headers": {
"Accept-Encoding": "gzip",
"Connection": "close",
"Cookie": "name=value; name2=value2; name3=value3",
**request["headers"],
},
"cookies": {"name": "value", "name2": "value2", "name3": "value3"},
"method": "GET",
"query_string": "foo=bar",
"url": "http://{host}/hi".format(host=host),
}
assert event["tags"] == {"foo": "42"}
assert (
event["transaction"]
== "tests.integrations.tornado.test_tornado.CrashingHandler.get"
)
assert event["transaction_info"] == {"source": "component"}
assert not sentry_sdk.get_isolation_scope()._tags
@pytest.mark.parametrize(
"handler,code",
[
(CrashingHandler, 500),
(HelloHandler, 200),
],
)
def test_transactions(tornado_testcase, sentry_init, capture_events, handler, code):
sentry_init(integrations=[TornadoIntegration()], traces_sample_rate=1.0)
events = capture_events()
client = tornado_testcase(Application([(r"/hi", handler)]))
with start_transaction(name="client") as span:
pass
response = client.fetch(
"/hi", method="POST", body=b"heyoo", headers=dict(span.iter_headers())
)
assert response.code == code
if code == 200:
client_tx, server_tx = events
server_error = None
else:
client_tx, server_error, server_tx = events
assert client_tx["type"] == "transaction"
assert client_tx["transaction"] == "client"
assert client_tx["transaction_info"] == {
"source": "custom"
} # because this is just the start_transaction() above.
if server_error is not None:
assert server_error["exception"]["values"][0]["type"] == "ZeroDivisionError"
assert (
server_error["transaction"]
== "tests.integrations.tornado.test_tornado.CrashingHandler.post"
)
assert server_error["transaction_info"] == {"source": "component"}
if code == 200:
assert (
server_tx["transaction"]
== "tests.integrations.tornado.test_tornado.HelloHandler.post"
)
else:
assert (
server_tx["transaction"]
== "tests.integrations.tornado.test_tornado.CrashingHandler.post"
)
assert server_tx["transaction_info"] == {"source": "component"}
assert server_tx["type"] == "transaction"
request = server_tx["request"]
host = request["headers"]["Host"]
assert server_tx["request"] == {
"env": {"REMOTE_ADDR": "127.0.0.1"},
"headers": {
"Accept-Encoding": "gzip",
"Connection": "close",
**request["headers"],
},
"method": "POST",
"query_string": "",
"data": {"heyoo": [""]},
"url": "http://{host}/hi".format(host=host),
}
assert (
client_tx["contexts"]["trace"]["trace_id"]
== server_tx["contexts"]["trace"]["trace_id"]
)
if server_error is not None:
assert (
server_error["contexts"]["trace"]["trace_id"]
== server_tx["contexts"]["trace"]["trace_id"]
)
def test_400_not_logged(tornado_testcase, sentry_init, capture_events):
sentry_init(integrations=[TornadoIntegration()])
events = capture_events()
class CrashingHandler(RequestHandler):
def get(self):
raise HTTPError(400, "Oops")
client = tornado_testcase(Application([(r"/", CrashingHandler)]))
response = client.fetch("/")
assert response.code == 400
assert not events
def test_user_auth(tornado_testcase, sentry_init, capture_events):
sentry_init(integrations=[TornadoIntegration()], send_default_pii=True)
events = capture_events()
class UserHandler(RequestHandler):
def get(self):
1 / 0
def get_current_user(self):
return 42
class NoUserHandler(RequestHandler):
def get(self):
1 / 0
client = tornado_testcase(
Application([(r"/auth", UserHandler), (r"/noauth", NoUserHandler)])
)
# has user
response = client.fetch("/auth")
assert response.code == 500
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert event["user"] == {"is_authenticated": True}
events.clear()
# has no user
response = client.fetch("/noauth")
assert response.code == 500
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert "user" not in event
def test_formdata(tornado_testcase, sentry_init, capture_events):
sentry_init(integrations=[TornadoIntegration()], send_default_pii=True)
events = capture_events()
class FormdataHandler(RequestHandler):
def post(self):
raise ValueError(json.dumps(sorted(self.request.body_arguments)))
client = tornado_testcase(Application([(r"/form", FormdataHandler)]))
response = client.fetch(
"/form?queryarg=1",
method="POST",
headers={"Content-Type": "application/x-www-form-urlencoded"},
body=b"field1=value1&field2=value2",
)
assert response.code == 500
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["value"] == '["field1", "field2"]'
assert event["request"]["data"] == {"field1": ["value1"], "field2": ["value2"]}
def test_json(tornado_testcase, sentry_init, capture_events):
sentry_init(integrations=[TornadoIntegration()], send_default_pii=True)
events = capture_events()
class FormdataHandler(RequestHandler):
def post(self):
raise ValueError(json.dumps(sorted(self.request.body_arguments)))
client = tornado_testcase(Application([(r"/form", FormdataHandler)]))
response = client.fetch(
"/form?queryarg=1",
method="POST",
headers={"Content-Type": "application/json"},
body=b"""
{"foo": {"bar": 42}}
""",
)
assert response.code == 500
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["value"] == "[]"
assert event
assert event["request"]["data"] == {"foo": {"bar": 42}}
def test_error_has_new_trace_context_performance_enabled(
tornado_testcase, sentry_init, capture_events
):
"""
Check if an 'trace' context is added to errros and transactions when performance monitoring is enabled.
"""
sentry_init(
integrations=[TornadoIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()
client = tornado_testcase(Application([(r"/hi", CrashingWithMessageHandler)]))
client.fetch("/hi")
(msg_event, error_event, transaction_event) = events
assert "trace" in msg_event["contexts"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert "trace" in error_event["contexts"]
assert "trace_id" in error_event["contexts"]["trace"]
assert "trace" in transaction_event["contexts"]
assert "trace_id" in transaction_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
== transaction_event["contexts"]["trace"]["trace_id"]
)
def test_error_has_new_trace_context_performance_disabled(
tornado_testcase, sentry_init, capture_events
):
"""
Check if an 'trace' context is added to errros and transactions when performance monitoring is disabled.
"""
sentry_init(
integrations=[TornadoIntegration()],
traces_sample_rate=None, # this is the default, just added for clarity
)
events = capture_events()
client = tornado_testcase(Application([(r"/hi", CrashingWithMessageHandler)]))
client.fetch("/hi")
(msg_event, error_event) = events
assert "trace" in msg_event["contexts"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert "trace" in error_event["contexts"]
assert "trace_id" in error_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
)
def test_error_has_existing_trace_context_performance_enabled(
tornado_testcase, sentry_init, capture_events
):
"""
Check if an 'trace' context is added to errros and transactions
from the incoming 'sentry-trace' header when performance monitoring is enabled.
"""
sentry_init(
integrations=[TornadoIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()
trace_id = "471a43a4192642f0b136d5159a501701"
parent_span_id = "6e8f22c393e68f19"
parent_sampled = 1
sentry_trace_header = "{}-{}-{}".format(trace_id, parent_span_id, parent_sampled)
headers = {"sentry-trace": sentry_trace_header}
client = tornado_testcase(Application([(r"/hi", CrashingWithMessageHandler)]))
client.fetch("/hi", headers=headers)
(msg_event, error_event, transaction_event) = events
assert "trace" in msg_event["contexts"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert "trace" in error_event["contexts"]
assert "trace_id" in error_event["contexts"]["trace"]
assert "trace" in transaction_event["contexts"]
assert "trace_id" in transaction_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
== transaction_event["contexts"]["trace"]["trace_id"]
== "471a43a4192642f0b136d5159a501701"
)
def test_error_has_existing_trace_context_performance_disabled(
tornado_testcase, sentry_init, capture_events
):
"""
Check if an 'trace' context is added to errros and transactions
from the incoming 'sentry-trace' header when performance monitoring is disabled.
"""
sentry_init(
integrations=[TornadoIntegration()],
traces_sample_rate=None, # this is the default, just added for clarity
)
events = capture_events()
trace_id = "471a43a4192642f0b136d5159a501701"
parent_span_id = "6e8f22c393e68f19"
parent_sampled = 1
sentry_trace_header = "{}-{}-{}".format(trace_id, parent_span_id, parent_sampled)
headers = {"sentry-trace": sentry_trace_header}
client = tornado_testcase(Application([(r"/hi", CrashingWithMessageHandler)]))
client.fetch("/hi", headers=headers)
(msg_event, error_event) = events
assert "trace" in msg_event["contexts"]
assert "trace_id" in msg_event["contexts"]["trace"]
assert "trace" in error_event["contexts"]
assert "trace_id" in error_event["contexts"]["trace"]
assert (
msg_event["contexts"]["trace"]["trace_id"]
== error_event["contexts"]["trace"]["trace_id"]
== "471a43a4192642f0b136d5159a501701"
)
def test_span_origin(tornado_testcase, sentry_init, capture_events):
sentry_init(integrations=[TornadoIntegration()], traces_sample_rate=1.0)
events = capture_events()
client = tornado_testcase(Application([(r"/hi", CrashingHandler)]))
client.fetch(
"/hi?foo=bar", headers={"Cookie": "name=value; name2=value2; name3=value3"}
)
(_, event) = events
assert event["contexts"]["trace"]["origin"] == "auto.http.tornado"
| HelloHandler |
python | getsentry__sentry | src/sentry/api/endpoints/organization_onboarding_tasks.py | {
"start": 744,
"end": 3260
} | class ____(OrganizationEndpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.TELEMETRY_EXPERIENCE
permission_classes = (OnboardingTaskPermission,)
def post(self, request: Request, organization) -> Response:
task_id = onboarding_tasks.get_task_lookup_by_key(request.data["task"])
if task_id is None:
return Response({"detail": "Invalid task key"}, status=422)
status_value = request.data.get("status")
completion_seen = request.data.get("completionSeen")
if status_value is None and completion_seen is None:
return Response({"detail": "completionSeen or status must be provided"}, status=422)
status = onboarding_tasks.get_status_lookup_by_key(status_value)
if status_value and status is None:
return Response({"detail": "Invalid status key"}, status=422)
# Cannot skip unskippable tasks
if (
status == OnboardingTaskStatus.SKIPPED
and task_id not in onboarding_tasks.get_skippable_tasks(organization)
):
return Response(status=422)
values = {}
if status:
values["status"] = status
values["date_completed"] = timezone.now()
if completion_seen:
values["completion_seen"] = timezone.now()
rows_affected, created = onboarding_tasks.create_or_update_onboarding_task(
organization=organization,
task=task_id,
user=request.user,
values=values,
)
if created and task_id == OnboardingTask.FIRST_PROJECT:
scope = sentry_sdk.get_current_scope()
scope.set_extra("org", organization.id)
sentry_sdk.capture_message(
f"Onboarding task {task_id} was created unexpectedly. It should have been updated instead.",
level="warning",
)
if rows_affected or created:
onboarding_tasks.try_mark_onboarding_complete(organization.id)
return Response(status=204)
def get(self, request: Request, organization: Organization) -> Response:
tasks_to_serialize = list(
onboarding_tasks.fetch_onboarding_tasks(organization, request.user)
)
serialized_tasks = serialize(tasks_to_serialize, request.user)
return Response({"onboardingTasks": serialized_tasks}, status=200)
| OrganizationOnboardingTaskEndpoint |
python | django-debug-toolbar__django-debug-toolbar | tests/test_forms.py | {
"start": 369,
"end": 572
} | class ____(forms.Form):
value = forms.CharField()
# Include a datetime in the tests because it's not serializable back
# to a datetime by SignedDataForm
date = forms.DateTimeField()
| FooForm |
python | getsentry__sentry | src/sentry/models/project.py | {
"start": 7006,
"end": 35272
} | class ____(Model):
from sentry.models.projectteam import ProjectTeam
"""
Projects are permission based namespaces which generally
are the top level entry point for all data.
"""
__relocation_scope__ = RelocationScope.Organization
slug = SentrySlugField(max_length=PROJECT_SLUG_MAX_LENGTH)
# DEPRECATED do not use, prefer slug
name = models.CharField(max_length=200)
forced_color = models.CharField(max_length=6, null=True, blank=True)
organization = FlexibleForeignKey("sentry.Organization")
teams = models.ManyToManyField("sentry.Team", related_name="teams", through=ProjectTeam)
public = models.BooleanField(default=False)
date_added = models.DateTimeField(default=timezone.now)
status = BoundedPositiveIntegerField(
default=0,
choices=(
(ObjectStatus.ACTIVE, _("Active")),
(ObjectStatus.PENDING_DELETION, _("Pending Deletion")),
(ObjectStatus.DELETION_IN_PROGRESS, _("Deletion in Progress")),
),
db_index=True,
)
# projects that were created before this field was present
# will have their first_event field set to date_added
first_event = models.DateTimeField(null=True)
template = FlexibleForeignKey("sentry.ProjectTemplate", null=True)
# external_id for the projects managed/provisioned through the 3rd party
external_id = models.CharField(max_length=256, null=True)
class flags(TypedClassBitField):
# WARNING: Only add flags to the bottom of this list
# bitfield flags are dependent on their order and inserting/removing
# flags from the middle of the list will cause bits to shift corrupting
# existing data.
# This Project has sent release data
has_releases: bool
# This Project has issue alerts targeting
has_issue_alerts_targeting: bool
# This Project has sent transactions
has_transactions: bool
# This Project has filters
has_alert_filters: bool
# This Project has sessions
has_sessions: bool
# This Project has sent profiles
has_profiles: bool
# This Project has sent replays
has_replays: bool
# This project has sent feedbacks
has_feedbacks: bool
# This project has sent new feedbacks, from the user-initiated widget
has_new_feedbacks: bool
# spike protection flags are DEPRECATED
spike_protection_error_currently_active: bool
spike_protection_transaction_currently_active: bool
spike_protection_attachment_currently_active: bool
# This Project has event with minified stack trace
has_minified_stack_trace: bool
# This Project has cron monitors
has_cron_monitors: bool
# This Project has sent check-ins
has_cron_checkins: bool
# This Project has event with sourcemaps
has_sourcemaps: bool
# This Project has custom metrics
has_custom_metrics: bool
# `has_high_priority_alerts` is DEPRECATED
has_high_priority_alerts: bool
# This Project has sent insight request spans
has_insights_http: bool
# This Project has sent insight db spans
has_insights_db: bool
# This Project has sent insight assets spans
has_insights_assets: bool
# This Project has sent insight app starts spans
has_insights_app_start: bool
# This Project has sent insight screen load spans
has_insights_screen_load: bool
# This Project has sent insight vitals spans
has_insights_vitals: bool
# This Project has sent insight caches spans
has_insights_caches: bool
# This Project has sent insight queues spans
has_insights_queues: bool
# No longer used, use has_insights_agent_monitoring instead
has_insights_llm_monitoring: bool
# This Project has sent feature flags
has_flags: bool
# This Project has sent insight agent monitoring spans
has_insights_agent_monitoring: bool
# This Project has sent insight MCP spans
has_insights_mcp: bool
# This project has sent logs
has_logs: bool
# This project has sent trace metrics
has_trace_metrics: bool
bitfield_default = 10
objects: ClassVar[ProjectManager] = ProjectManager(cache_fields=["pk"])
platform = models.CharField(max_length=64, null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_project"
unique_together = (("organization", "slug"), ("organization", "external_id"))
__repr__ = sane_repr("team_id", "name", "slug", "organization_id")
def __str__(self) -> str:
return f"{self.name} ({self.slug})"
def next_short_id(self, delta: int = 1) -> int:
from sentry.models.counter import Counter
with (
sentry_sdk.start_span(op="project.next_short_id") as span,
metrics.timer("project.next_short_id"),
):
span.set_data("project_id", self.id)
span.set_data("project_slug", self.slug)
return Counter.increment(self, delta)
def _save_project(self, *args, **kwargs):
if settings.SENTRY_USE_SNOWFLAKE:
snowflake_redis_key = "project_snowflake_key"
save_with_snowflake_id(
instance=self,
snowflake_redis_key=snowflake_redis_key,
save_callback=lambda: super(Project, self).save(*args, **kwargs),
)
else:
super().save(*args, **kwargs)
def save(self, *args, **kwargs):
if getattr(self, "id", None) is not None:
# no need to acquire lock if we're updating an existing project
self._save_project(*args, **kwargs)
return
# when project is created, we need to acquire a lock to ensure that the generated slug is unique
lock = locks.get(f"slug:project:{self.organization_id}", duration=5, name="project_slug")
with TimedRetryPolicy(10)(lock.acquire):
if not self.slug:
slugify_instance(
self,
self.name,
organization=self.organization,
reserved=RESERVED_PROJECT_SLUGS,
max_length=50,
)
self._save_project(*args, **kwargs)
def get_absolute_url(self, params=None):
path = f"/organizations/{self.organization.slug}/issues/"
params = {} if params is None else params
params["project"] = self.id
query = None
if params:
query = urlencode(params)
return self.organization.absolute_url(path, query=query)
def is_internal_project(self):
for value in (settings.SENTRY_FRONTEND_PROJECT, settings.SENTRY_PROJECT):
if str(self.id) == str(value) or str(self.slug) == str(value):
return True
return False
@property
def option_manager(self) -> ProjectOptionManager:
from sentry.models.options.project_option import ProjectOption
return ProjectOption.objects
@property
def template_manager(self) -> ProjectTemplateOptionManager:
from sentry.models.options.project_template_option import ProjectTemplateOption
return ProjectTemplateOption.objects
def get_option(
self, key: str, default: Any | None = None, validate: Callable[[object], bool] | None = None
) -> Any:
return self.option_manager.get_value(self, key, default, validate)
def update_option(self, key: str, value: Any, reload_cache: bool = True) -> bool:
"""
Updates a project option for this project.
:param reload_cache: Invalidate the project config and reload the
cache. Do not call this with `False` unless you know for sure that
it's fine to keep the cached project config.
"""
return self.option_manager.set_value(self, key, value, reload_cache=reload_cache)
def delete_option(self, key: str) -> None:
self.option_manager.unset_value(self, key)
@property
def color(self):
if self.forced_color is not None:
return f"#{self.forced_color}"
assert self.slug is not None
return get_hashed_color(self.slug.upper())
@property
def member_set(self):
""":returns a QuerySet of all Users that belong to this Project"""
from sentry.models.organizationmember import OrganizationMember
return self.organization.member_set.filter(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=True,
organizationmemberteam__team__in=self.teams.all(),
).values("id"),
user_is_active=True,
user_id__isnull=False,
).distinct()
def get_members_as_rpc_users(self) -> list[RpcUser]:
member_ids = self.member_set.values_list("user_id", flat=True)
return user_service.get_many_by_id(ids=list(member_ids))
def get_audit_log_data(self):
return {
"id": self.id,
"slug": self.slug,
"name": self.name,
"status": self.status,
"public": self.public,
}
def get_full_name(self):
return self.slug
def transfer_to(self, organization: Organization) -> None:
from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion
from sentry.incidents.models.alert_rule import AlertRule
from sentry.integrations.models.external_issue import ExternalIssue
from sentry.integrations.models.repository_project_path_config import (
RepositoryProjectPathConfig,
)
from sentry.models.environment import Environment, EnvironmentProject
from sentry.models.projectcodeowners import ProjectCodeOwners
from sentry.models.projectteam import ProjectTeam
from sentry.models.releaseprojectenvironment import ReleaseProjectEnvironment
from sentry.models.releases.release_project import ReleaseProject
from sentry.models.rule import Rule
from sentry.monitors.models import Monitor, MonitorEnvironment, MonitorStatus
from sentry.snuba.models import SnubaQuery
from sentry.workflow_engine.models import DataConditionGroup, DataSource, Detector, Workflow
old_org_id = self.organization_id
org_changed = old_org_id != organization.id
self.organization = organization
try:
with transaction.atomic(router.db_for_write(Project)):
self.update(organization=organization)
except IntegrityError:
slugify_instance(self, self.name, organization=organization, max_length=50)
self.update(slug=self.slug, organization=organization)
# Both environments and releases are bound at an organization level.
# Due to this, when you transfer a project into another org, we have to
# handle this behavior somehow. We really only have two options here:
# * Copy over all releases/environments into the new org and handle de-duping
# * Delete the bindings and let them reform with new data.
# We're generally choosing to just delete the bindings since new data
# flowing in will recreate links correctly. The tradeoff is that
# historical data is lost, but this is a compromise we're willing to
# take and a side effect of allowing this feature. There are exceptions
# to this however, such as rules, which should maintain their
# configuration when moved across organizations.
if org_changed:
for model in ReleaseProject, ReleaseProjectEnvironment, EnvironmentProject:
model.objects.filter(project_id=self.id).delete()
# this is getting really gross, but make sure there aren't lingering associations
# with old orgs or teams
ProjectTeam.objects.filter(project=self, team__organization_id=old_org_id).delete()
rules_by_environment_id = defaultdict(set)
for rule_id, environment_id in Rule.objects.filter(
project_id=self.id, environment_id__isnull=False
).values_list("id", "environment_id"):
assert environment_id is not None
rules_by_environment_id[environment_id].add(rule_id)
environment_names = dict(
Environment.objects.filter(organization_id=old_org_id).values_list("id", "name")
)
for environment_id, rule_ids in rules_by_environment_id.items():
Rule.objects.filter(id__in=rule_ids).update(
environment_id=Environment.get_or_create(self, environment_names[environment_id]).id
)
# Manually move over organization id's for Monitors
monitors = Monitor.objects.filter(organization_id=old_org_id, project_id=self.id)
new_monitors = set(
Monitor.objects.filter(organization_id=organization.id).values_list("slug", flat=True)
)
for monitor in monitors:
if monitor.slug in new_monitors:
RegionScheduledDeletion.schedule(monitor, days=0)
else:
for monitor_env_id, env_id in MonitorEnvironment.objects.filter(
monitor_id=monitor.id, status=MonitorStatus.ACTIVE
).values_list("id", "environment_id"):
MonitorEnvironment.objects.filter(id=monitor_env_id).update(
environment_id=Environment.get_or_create(
self, name=environment_names.get(env_id, None)
).id
)
monitor.update(organization_id=organization.id)
# Remove alert owners not in new org
alert_rules = AlertRule.objects.fetch_for_project(self).filter(
Q(user_id__isnull=False) | Q(team_id__isnull=False)
)
for alert_rule in alert_rules:
is_member = False
if alert_rule.user_id:
is_member = organization.member_set.filter(user_id=alert_rule.user_id).exists()
if alert_rule.team_id:
is_member = Team.objects.filter(
organization_id=organization.id, id=alert_rule.team_id
).exists()
if not is_member:
alert_rule.update(team_id=None, user_id=None)
rule_models = Rule.objects.filter(
Q(owner_team_id__isnull=False) | Q(owner_user_id__isnull=False), project=self
)
for rule_model in rule_models:
is_member = False
if rule_model.owner_user_id:
is_member = organization.member_set.filter(
user_id=rule_model.owner_user_id
).exists()
if rule_model.owner_team_id:
is_member = Team.objects.filter(
organization_id=organization.id, id=rule_model.owner_team_id
).exists()
if not is_member:
rule_model.update(owner_user_id=None, owner_team_id=None)
# [Rule, AlertRule(SnubaQuery->Environment)]
# id -> name
environment_names_with_alerts = {
**environment_names,
**{
env_id: env_name
for env_id, env_name in AlertRule.objects.fetch_for_project(self).values_list(
"snuba_query__environment__id", "snuba_query__environment__name"
)
},
}
# conditionally create a new environment associated to the new Org -> Project -> AlertRule -> SnubaQuery
# this should take care of any potentially dead references from SnubaQuery -> Environment when deleting
# the old org
# alertrule -> snuba_query -> environment_id
for snuba_id, environment_id in AlertRule.objects.fetch_for_project(self).values_list(
"snuba_query_id", "snuba_query__environment__id"
):
SnubaQuery.objects.filter(id=snuba_id).update(
environment_id=Environment.get_or_create(
self, name=environment_names_with_alerts.get(environment_id, None)
).id
)
AlertRule.objects.fetch_for_project(self).update(organization=organization)
# Transfer DataSource, Workflow, and DataConditionGroup objects for Detectors attached to this project.
# * DataSources link detectors to their data sources (QuerySubscriptions, Monitors, etc.).
# * Workflows are connected to detectors and define what actions to take.
# * DataConditionGroups are connected to workflows (unique 1:1 via WorkflowDataConditionGroup).
# Since Detectors are project-scoped and their DataSources are project-specific,
# we need to update all related organization-scoped workflow_engine models.
#
# IMPORTANT: Workflows and DataConditionGroups can be shared across multiple projects
# in the same organization. We only transfer them if they're exclusively used by
# detectors in this project. Shared workflows remain in the original organization.
# There are certainly more correct ways to do this, but this should cover most cases.
detector_ids = Detector.objects.filter(project_id=self.id).values_list("id", flat=True)
if detector_ids:
# Update DataSources
# DataSources are 1:1 with their source (e.g., QuerySubscription) so they always transfer
data_source_ids = (
DataSource.objects.filter(detectors__id__in=detector_ids)
.distinct()
.values_list("id", flat=True)
)
DataSource.objects.filter(id__in=data_source_ids).update(
organization_id=organization.id
)
# Update Workflows connected to these detectors
# Only transfer workflows that are exclusively used by detectors in this project
all_workflow_ids = (
Workflow.objects.filter(detectorworkflow__detector_id__in=detector_ids)
.distinct()
.values_list("id", flat=True)
)
# Find workflows that are ONLY connected to detectors in this project
exclusive_workflow_ids = (
Workflow.objects.filter(id__in=all_workflow_ids)
.annotate(
detector_count=Count("detectorworkflow__detector"),
project_detector_count=Count(
"detectorworkflow__detector",
filter=Q(detectorworkflow__detector_id__in=detector_ids),
),
)
.filter(detector_count=models.F("project_detector_count"))
.values_list("id", flat=True)
)
Workflow.objects.filter(id__in=exclusive_workflow_ids).update(
organization_id=organization.id
)
# Update DataConditionGroups connected to the transferred workflows
# These are linked via WorkflowDataConditionGroup with a unique constraint on condition_group
workflow_condition_group_ids = (
DataConditionGroup.objects.filter(
workflowdataconditiongroup__workflow_id__in=exclusive_workflow_ids
)
.distinct()
.values_list("id", flat=True)
)
DataConditionGroup.objects.filter(id__in=workflow_condition_group_ids).update(
organization_id=organization.id
)
# Update DataConditionGroups that are directly owned by detectors
# These are linked via Detector.workflow_condition_group (unique FK)
# and are exclusively owned by the detector, so they always transfer
detector_condition_group_ids = (
Detector.objects.filter(
id__in=detector_ids, workflow_condition_group_id__isnull=False
)
.values_list("workflow_condition_group_id", flat=True)
.distinct()
)
DataConditionGroup.objects.filter(id__in=detector_condition_group_ids).update(
organization_id=organization.id
)
# Update DataConditionGroups used as when_condition_group in transferred workflows
# DataConditionGroups are never shared, so transfer all when_condition_groups
when_condition_group_ids = (
Workflow.objects.filter(
id__in=exclusive_workflow_ids, when_condition_group_id__isnull=False
)
.values_list("when_condition_group_id", flat=True)
.distinct()
)
DataConditionGroup.objects.filter(id__in=when_condition_group_ids).update(
organization_id=organization.id
)
# Manually move over external issues to the new org
linked_groups = GroupLink.objects.filter(project_id=self.id).values_list(
"linked_id", flat=True
)
# Delete issue ownership objects to prevent them from being stuck on the old org
ProjectCodeOwners.objects.filter(project_id=self.id).delete()
RepositoryProjectPathConfig.objects.filter(project_id=self.id).delete()
for external_issues in chunked(
RangeQuerySetWrapper(
ExternalIssue.objects.filter(organization_id=old_org_id, id__in=linked_groups),
step=1000,
),
1000,
):
for ei in external_issues:
ei.organization_id = organization.id
ExternalIssue.objects.bulk_update(external_issues, ["organization_id"])
def add_team(self, team):
from sentry.models.projectteam import ProjectTeam
try:
with transaction.atomic(router.db_for_write(ProjectTeam)):
ProjectTeam.objects.create(project=self, team=team)
except IntegrityError:
return False
else:
return True
def remove_team(self, team):
from sentry.incidents.models.alert_rule import AlertRule
from sentry.models.projectteam import ProjectTeam
from sentry.models.rule import Rule
ProjectTeam.objects.filter(project=self, team=team).delete()
AlertRule.objects.fetch_for_project(self).filter(team_id=team.id).update(team_id=None)
Rule.objects.filter(owner_team_id=team.id, project=self).update(owner_team_id=None)
def get_security_token(self):
lock = locks.get(self.get_lock_key(), duration=5, name="project_security_token")
with TimedRetryPolicy(10)(lock.acquire):
security_token = self.get_option("sentry:token", None)
if security_token is None:
security_token = uuid1().hex
self.update_option("sentry:token", security_token)
return security_token
def get_lock_key(self) -> str:
return f"project_token:{self.id}"
def copy_settings_from(self, project_id: int) -> bool:
"""
Copies project level settings of the inputted project
- General Settings
- ProjectTeams
- Alerts Settings and Rules
- EnvironmentProjects
- ProjectOwnership Rules and settings
- Project Inbound Data Filters
Returns True if the settings have successfully been copied over
Returns False otherwise
"""
from sentry.models.environment import EnvironmentProject
from sentry.models.options.project_option import ProjectOption
from sentry.models.projectownership import ProjectOwnership
from sentry.models.projectteam import ProjectTeam
from sentry.models.rule import Rule
# XXX: this type sucks but it helps the type checker understand
model_list: tuple[type[EnvironmentProject | ProjectOwnership | ProjectTeam | Rule], ...] = (
EnvironmentProject,
ProjectOwnership,
ProjectTeam,
Rule,
)
project = Project.objects.get(id=project_id)
try:
with transaction.atomic(router.db_for_write(Project)):
for model in model_list:
# remove all previous project settings
model.objects.filter(project_id=self.id).delete()
# add settings from other project to self
for setting in model.objects.filter(project_id=project_id):
setting.pk = None
setting.project_id = self.id
setting.save()
options = ProjectOption.objects.get_all_values(project=project)
for key, value in options.items():
self.update_option(key, value)
except IntegrityError as e:
logging.exception(
"Error occurred during copy project settings.",
extra={
"error": str(e),
"project_to": self.id,
"project_from": project_id,
},
)
return False
return True
@staticmethod
def is_valid_platform(value):
return not value or value == "other" or value in GETTING_STARTED_DOCS_PLATFORMS
@staticmethod
def outbox_for_update(project_identifier: int, organization_identifier: int) -> RegionOutbox:
return RegionOutbox(
shard_scope=OutboxScope.ORGANIZATION_SCOPE,
shard_identifier=organization_identifier,
category=OutboxCategory.PROJECT_UPDATE,
object_identifier=project_identifier,
)
def delete(self, *args, **kwargs):
# There is no foreign key relationship so we have to manually cascade.
notifications_service.remove_notification_settings_for_project(project_id=self.id)
# There are projects being blocked from deletion because they have GroupHash objects
# that are preventing the project from being deleted.
try:
from sentry.deletions.defaults.group import delete_project_group_hashes
delete_project_group_hashes(project_id=self.id)
except Exception:
logger.warning("Failed to delete group hashes for project %s", self.id)
with outbox_context(transaction.atomic(router.db_for_write(Project))):
Project.outbox_for_update(self.id, self.organization_id).save()
return super().delete(*args, **kwargs)
def normalize_before_relocation_import(
self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
) -> int | None:
old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
# A `Global` restore implies a blanket restoration of all data. In such a case, we want to
# ensure that project IDs remain unchanged, so that recovering users do not need to mint new
# DSNs post-recovery.
if scope == ImportScope.Global:
self.pk = old_pk
return old_pk
def write_relocation_import(
self, scope: ImportScope, flags: ImportFlags
) -> tuple[int, ImportKind] | None:
from sentry.receivers.project_detectors import disable_default_detector_creation
with disable_default_detector_creation():
return super().write_relocation_import(scope, flags)
# pending deletion implementation
_pending_fields = ("slug",)
def rename_on_pending_deletion(self) -> None:
rename_on_pending_deletion(self.organization_id, self, self._pending_fields)
def reset_pending_deletion_field_names(self) -> bool:
return reset_pending_deletion_field_names(self.organization_id, self, self._pending_fields)
def delete_pending_deletion_option(self) -> None:
delete_pending_deletion_option(self.organization_id, self)
pre_delete.connect(
lambda instance, **k: instance.delete_pending_deletion_option(),
sender=Project,
weak=False,
)
| Project |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 60908,
"end": 64421
} | class ____(InstanceProperty):
"""Wraps an instance method access (e.g. `x.foo(arg)` in a Keras Layer.
This layer takes an attribute name `attr_name` in the constructor and,
when called on input tensor `obj` with additional arguments `args` and
`kwargs` returns `obj.attr_name(*args, **kwargs)`.
KerasTensors specialized for specific extension types use it to
represent dynamic instance method calls on the represented object, e.g.
x = keras.Input(..., ragged=True)
new_values = keras.Input(...)
out = x.with_values(new_values)
"""
def call(self, obj, args, kwargs):
method = getattr(obj, self.attr_name)
return method(*args, **kwargs)
def _delegate_property(keras_tensor_cls, property_name): # pylint: disable=invalid-name
"""Register property on a KerasTensor class.
Calling this multiple times with the same arguments should be a no-op.
This method exposes a property on the KerasTensor class that will use an
`InstanceProperty` layer to access the property on the represented
intermediate values in the model.
Args:
keras_tensor_cls: The KerasTensor subclass that should expose the property.
property_name: The name of the property to expose and delegate to the
represented (Composite)Tensor.
"""
# We use a lambda because we can't create a Keras layer at import time
# due to dynamic layer class versioning.
property_access = property(lambda self: InstanceProperty(property_name)(self)) # pylint: disable=unnecessary-lambda
setattr(keras_tensor_cls, property_name, property_access)
def _delegate_method(keras_tensor_cls, method_name): # pylint: disable=invalid-name
"""Register method on a KerasTensor class.
Calling this function times with the same arguments should be a no-op.
This method exposes an instance method on the KerasTensor class that will use
an `InstanceMethod` layer to run the desired method on the represented
intermediate values in the model.
Args:
keras_tensor_cls: The KerasTensor subclass that should expose the property.
method_name: The name of the method to expose and delegate to the
represented (Composite)Tensor.
"""
def delegate(self, *args, **kwargs):
return InstanceMethod(method_name)(self, args, kwargs)
setattr(keras_tensor_cls, method_name, delegate)
# We do not support the `uniform_row_length` property because it
# returns either `None` or an int tensor, and code that relies on it tends
# to check `is None` directly. Delegating it here would always return a
# `KerasTensor`, regardless of what can be statically inferred. This would
# never equal `None`, breaking code that expects it to be partially-static
# in unpredictable ways.
for ragged_property in [
'values',
'flat_values',
'row_splits',
'nested_row_splits'
]:
_delegate_property(keras_tensor.RaggedKerasTensor, ragged_property)
for ragged_method_name in [
'value_rowids',
'nested_value_rowids',
'nrows',
'row_starts',
'row_limits',
'row_lengths',
'nested_row_lengths',
'bounding_shape',
'with_values',
'with_flat_values',
'with_row_splits_dtype',
'merge_dims',
'to_tensor',
'to_sparse',
]:
_delegate_method(keras_tensor.RaggedKerasTensor, ragged_method_name)
for sparse_property in [
'indices',
'values',
]:
_delegate_property(keras_tensor.SparseKerasTensor, sparse_property)
for sparse_method in [
'with_values',
]:
_delegate_method(keras_tensor.SparseKerasTensor, sparse_method)
| InstanceMethod |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_A.py | {
"start": 96,
"end": 1364
} | class ____(Benchmark):
r"""
Ackley01 objective function.
The Ackley01 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Ackley01}}(x) = -20 e^{-0.2 \sqrt{\frac{1}{n} \sum_{i=1}^n
x_i^2}} - e^{\frac{1}{n} \sum_{i=1}^n \cos(2 \pi x_i)} + 20 + e
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-35, 35]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Adorio, E. MVF - "Multivariate Test Functions Library in C for
Unconstrained Global Optimization", 2005
TODO: the -0.2 factor in the exponent of the first term is given as
-0.02 in Jamil et al.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-35.0] * self.N, [35.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
u = sum(x ** 2)
v = sum(cos(2 * pi * x))
return (-20. * exp(-0.2 * sqrt(u / self.N))
- exp(v / self.N) + 20. + exp(1.))
| Ackley01 |
python | PrefectHQ__prefect | tests/test_serializers.py | {
"start": 689,
"end": 756
} | class ____(BaseModel):
x: int
y: uuid.UUID
@dataclass
| MyModel |
python | PyCQA__pylint | tests/functional/m/membership_protocol.py | {
"start": 1397,
"end": 1590
} | class ____:
stuff = None
def get_stuff(self):
return self.stuff
def act(self, thing):
stuff = self.get_stuff()
if thing in stuff:
pass
| UsefulMixin |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/hooks/test_hive.py | {
"start": 13716,
"end": 25068
} | class ____:
def setup_method(self):
self.next_day = (DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()[:10]
self.database = "airflow"
self.partition_by = "ds"
self.table = "static_babynames_partitioned"
with (
mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_metastore_client"
) as get_metastore_mock,
mock.patch("airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_connection"),
):
get_metastore_mock.return_value = mock.MagicMock()
self.hook = HiveMetastoreHook()
VALID_FILTER_MAP = {"key2": "value2"}
def test_get_max_partition_from_empty_part_specs(self):
max_partition = HiveMetastoreHook._get_max_partition_from_part_specs(
[], "key1", self.VALID_FILTER_MAP
)
assert max_partition is None
def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self):
with pytest.raises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{"key1": "value1", "key2": "value2"}, {"key1": "value3", "key2": "value4"}],
"key1",
{"key3": "value5"},
)
def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self):
with pytest.raises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{"key1": "value1", "key2": "value2"}, {"key1": "value3", "key2": "value4"}],
"key3",
self.VALID_FILTER_MAP,
)
def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self):
with pytest.raises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{"key1": "value1", "key2": "value2"}, {"key1": "value3", "key2": "value4"}],
None,
self.VALID_FILTER_MAP,
)
def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self):
max_partition = HiveMetastoreHook._get_max_partition_from_part_specs(
[{"key1": "value1", "key2": "value2"}, {"key1": "value3", "key2": "value4"}], "key1", None
)
# No partition will be filtered out.
assert max_partition == "value3"
def test_get_max_partition_from_valid_part_specs(self):
max_partition = HiveMetastoreHook._get_max_partition_from_part_specs(
[{"key1": "value1", "key2": "value2"}, {"key1": "value3", "key2": "value4"}],
"key1",
self.VALID_FILTER_MAP,
)
assert max_partition == "value1"
def test_get_max_partition_from_valid_part_specs_return_type(self):
max_partition = HiveMetastoreHook._get_max_partition_from_part_specs(
[{"key1": "value1", "key2": "value2"}, {"key1": "value3", "key2": "value4"}],
"key1",
self.VALID_FILTER_MAP,
)
assert isinstance(max_partition, str)
@mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook._find_valid_host",
return_value="localhost",
)
@mock.patch("airflow.providers.apache.hive.hooks.hive.socket")
def test_error_metastore_client(self, socket_mock, _find_valid_host_mock):
socket_mock.socket.return_value.connect_ex.return_value = 0
self.hook.get_metastore_client()
@mock.patch("airflow.providers.apache.hive.hooks.hive.socket")
def test_ha_hosts(self, socket_mock):
with mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_connection",
return_value=Connection(host="metastore1.host,metastore2.host", port=9802),
):
socket_mock.socket.return_value.connect_ex.return_value = 1
with pytest.raises(AirflowException):
HiveMetastoreHook()
assert socket_mock.socket.call_count == 2
def test_get_conn(self):
with (
mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook._find_valid_host"
) as find_valid_host,
mock.patch(
"airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_connection"
) as get_connection,
):
find_valid_host.return_value = mock.MagicMock(return_value="")
get_connection.return_value = mock.MagicMock(return_value="")
metastore_hook = HiveMetastoreHook()
assert isinstance(metastore_hook.get_conn(), HMSClient)
def test_check_for_partition(self):
# Check for existent partition.
FakePartition = namedtuple("FakePartition", ["values"])
fake_partition = FakePartition(["2015-01-01"])
metastore = self.hook.metastore.__enter__()
partition = f"{self.partition_by}='{DEFAULT_DATE_DS}'"
metastore.get_partitions_by_filter = mock.MagicMock(return_value=[fake_partition])
assert self.hook.check_for_partition(self.database, self.table, partition)
metastore.get_partitions_by_filter(
self.database, self.table, partition, HiveMetastoreHook.MAX_PART_COUNT
)
# Check for non-existent partition.
missing_partition = f"{self.partition_by}='{self.next_day}'"
metastore.get_partitions_by_filter = mock.MagicMock(return_value=[])
assert not self.hook.check_for_partition(self.database, self.table, missing_partition)
metastore.get_partitions_by_filter.assert_called_with(
self.database, self.table, missing_partition, HiveMetastoreHook.MAX_PART_COUNT
)
def test_check_for_named_partition(self):
# Check for existing partition.
partition = f"{self.partition_by}={DEFAULT_DATE_DS}"
self.hook.metastore.__enter__().check_for_named_partition = mock.MagicMock(return_value=True)
assert self.hook.check_for_named_partition(self.database, self.table, partition)
self.hook.metastore.__enter__().check_for_named_partition.assert_called_with(
self.database, self.table, partition
)
# Check for non-existent partition
missing_partition = f"{self.partition_by}={self.next_day}"
self.hook.metastore.__enter__().check_for_named_partition = mock.MagicMock(return_value=False)
assert not self.hook.check_for_named_partition(self.database, self.table, missing_partition)
self.hook.metastore.__enter__().check_for_named_partition.assert_called_with(
self.database, self.table, missing_partition
)
def test_get_table(self):
self.hook.metastore.__enter__().get_table = mock.MagicMock()
self.hook.get_table(db=self.database, table_name=self.table)
self.hook.metastore.__enter__().get_table.assert_called_with(
dbname=self.database, tbl_name=self.table
)
def test_get_tables(self): # static_babynames_partitioned
self.hook.metastore.__enter__().get_tables = mock.MagicMock(
return_value=["static_babynames_partitioned"]
)
self.hook.get_tables(db=self.database, pattern=self.table + "*")
self.hook.metastore.__enter__().get_tables.assert_called_with(
db_name="airflow", pattern="static_babynames_partitioned*"
)
self.hook.metastore.__enter__().get_table_objects_by_name.assert_called_with(
"airflow", ["static_babynames_partitioned"]
)
def test_get_databases(self):
metastore = self.hook.metastore.__enter__()
metastore.get_databases = mock.MagicMock()
self.hook.get_databases(pattern="*")
metastore.get_databases.assert_called_with("*")
def test_get_partitions(self):
FakeFieldSchema = namedtuple("FakeFieldSchema", ["name"])
fake_schema = FakeFieldSchema("ds")
FakeTable = namedtuple("FakeTable", ["partitionKeys"])
fake_table = FakeTable([fake_schema])
FakePartition = namedtuple("FakePartition", ["values"])
fake_partition = FakePartition(["2015-01-01"])
metastore = self.hook.metastore.__enter__()
metastore.get_table = mock.MagicMock(return_value=fake_table)
metastore.get_partitions = mock.MagicMock(return_value=[fake_partition])
partitions = self.hook.get_partitions(schema=self.database, table_name=self.table)
assert len(partitions) == 1
assert partitions == [{self.partition_by: DEFAULT_DATE_DS}]
metastore.get_table.assert_called_with(dbname=self.database, tbl_name=self.table)
metastore.get_partitions.assert_called_with(
db_name=self.database, tbl_name=self.table, max_parts=HiveMetastoreHook.MAX_PART_COUNT
)
def test_max_partition(self):
FakeFieldSchema = namedtuple("FakeFieldSchema", ["name"])
fake_schema = FakeFieldSchema("ds")
FakeTable = namedtuple("FakeTable", ["partitionKeys"])
fake_table = FakeTable([fake_schema])
metastore = self.hook.metastore.__enter__()
metastore.get_table = mock.MagicMock(return_value=fake_table)
metastore.get_partition_names = mock.MagicMock(return_value=["ds=2015-01-01"])
metastore.partition_name_to_spec = mock.MagicMock(return_value={"ds": "2015-01-01"})
filter_map = {self.partition_by: DEFAULT_DATE_DS}
partition = self.hook.max_partition(
schema=self.database, table_name=self.table, field=self.partition_by, filter_map=filter_map
)
assert partition == DEFAULT_DATE_DS
metastore.get_table.assert_called_with(dbname=self.database, tbl_name=self.table)
metastore.get_partition_names.assert_called_with(
self.database, self.table, max_parts=HiveMetastoreHook.MAX_PART_COUNT
)
metastore.partition_name_to_spec.assert_called_with("ds=2015-01-01")
def test_table_exists(self):
# Test with existent table.
self.hook.metastore.__enter__().get_table = mock.MagicMock(return_value=True)
assert self.hook.table_exists(self.table, db=self.database)
self.hook.metastore.__enter__().get_table.assert_called_with(
dbname="airflow", tbl_name="static_babynames_partitioned"
)
# Test with non-existent table.
self.hook.metastore.__enter__().get_table = mock.MagicMock(side_effect=Exception())
assert not self.hook.table_exists("does-not-exist")
self.hook.metastore.__enter__().get_table.assert_called_with(
dbname="default", tbl_name="does-not-exist"
)
@mock.patch("airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.table_exists")
@mock.patch("airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook.get_metastore_client")
def test_drop_partition(self, get_metastore_client_mock, table_exist_mock):
metastore_mock = get_metastore_client_mock.return_value
table_exist_mock.return_value = True
ret = self.hook.drop_partitions(self.table, db=self.database, part_vals=[DEFAULT_DATE_DS])
table_exist_mock.assert_called_once_with(self.table, self.database)
assert metastore_mock.drop_partition(self.table, db=self.database, part_vals=[DEFAULT_DATE_DS]), ret
@pytest.mark.db_test
| TestHiveMetastoreHook |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_getitem.py | {
"start": 370,
"end": 2222
} | class ____:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_getitem_list_of_labels_categoricalindex_cols(self):
# GH#16115
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
expected = DataFrame([[1, 0], [0, 1]], dtype="bool", index=[0, 1], columns=cats)
dummies = get_dummies(cats)
result = dummies[list(dummies.columns)]
tm.assert_frame_equal(result, expected)
def test_getitem_sparse_column_return_type_and_dtype(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = DataFrame({"A": data})
expected = Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
# Also check iloc and loc while we're here
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
def test_getitem_string_columns(self):
# GH#46185
df = DataFrame([[1, 2]], columns=Index(["A", "B"], dtype="string"))
result = df.A
expected = df["A"]
tm.assert_series_equal(result, expected)
| TestGetitem |
python | vyperlang__vyper | vyper/semantics/analysis/data_positions.py | {
"start": 2760,
"end": 3940
} | class ____:
storage_allocator: SimpleAllocator
transient_storage_allocator: SimpleAllocator
immutables_allocator: SimpleAllocator
_global_nonreentrancy_key_slot: int
def __init__(self):
self.storage_allocator = SimpleAllocator(max_slot=2**256)
self.transient_storage_allocator = SimpleAllocator(max_slot=2**256)
self.immutables_allocator = SimpleAllocator(max_slot=0x6000)
def get_allocator(self, location: DataLocation):
if location == DataLocation.STORAGE:
return self.storage_allocator
if location == DataLocation.TRANSIENT:
return self.transient_storage_allocator
if location == DataLocation.CODE:
return self.immutables_allocator
raise CompilerPanic("unreachable") # pragma: nocover
def allocate_global_nonreentrancy_slot(self):
location = get_reentrancy_key_location()
allocator = self.get_allocator(location)
slot = allocator.allocate_global_nonreentrancy_slot()
self._global_nonreentrancy_key_slot = slot
def get_global_nonreentrant_key_slot(self):
return self._global_nonreentrancy_key_slot
| Allocators |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/kvstore/types.py | {
"start": 2686,
"end": 6295
} | class ____(Generic[MutableMappingT], BaseKVStore):
"""
MutableMapping Key-Value store.
Args:
mapping_factory (Callable[[], MutableMapping[str, dict]): the mutable mapping factory
"""
def __init__(self, mapping_factory: Callable[[], MutableMappingT]) -> None:
"""Initialize a MutableMappingKVStore."""
self._collections_mappings: Dict[str, MutableMappingT] = {}
self._mapping_factory = mapping_factory
def __getstate__(self) -> dict:
state = self.__dict__.copy()
state["factory_fn"] = {"fn": self._mapping_factory}
del state["_mapping_factory"]
return state
def __setstate__(self, state: dict) -> None:
self._collections_mappings = state["_collections_mappings"]
self._mapping_factory = state["factory_fn"]["fn"]
def _get_collection_mapping(self, collection: str) -> MutableMappingT:
"""Get a collection mapping. Create one if it does not exist."""
if collection not in self._collections_mappings:
self._collections_mappings[collection] = self._mapping_factory()
return self._collections_mappings[collection]
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
"""Put a key-value pair into the store."""
self._get_collection_mapping(collection)[key] = val.copy()
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
"""Put a key-value pair into the store."""
self.put(key, val, collection=collection)
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""Get a value from the store."""
mapping = self._get_collection_mapping(collection)
if key not in mapping:
return None
return mapping[key].copy()
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""Get a value from the store."""
return self.get(key, collection=collection)
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return dict(self._get_collection_mapping(collection))
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""Get all values from the store."""
return self.get_all(collection=collection)
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
try:
self._get_collection_mapping(collection).pop(key)
return True
except KeyError:
return False
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""Delete a value from the store."""
return self.delete(key, collection=collection)
# this method is here to avoid TypeChecker shows an error
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
"""Persist the store."""
raise NotImplementedError(
"Use subclasses of MutableMappingKVStore (such as SimpleKVStore) to call this method"
)
# this method is here to avoid TypeChecker shows an error
def from_persist_path(cls, persist_path: str) -> "MutableMappingKVStore":
"""Create a MutableMappingKVStore from a persist directory."""
raise NotImplementedError(
"Use subclasses of MutableMappingKVStore (such as SimpleKVStore) to call this method"
)
| MutableMappingKVStore |
python | openai__openai-python | src/openai/types/chat/chat_completion_system_message_param.py | {
"start": 356,
"end": 815
} | class ____(TypedDict, total=False):
content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
"""The contents of the system message."""
role: Required[Literal["system"]]
"""The role of the messages author, in this case `system`."""
name: str
"""An optional name for the participant.
Provides the model information to differentiate between participants of the same
role.
"""
| ChatCompletionSystemMessageParam |
python | kamyu104__LeetCode-Solutions | Python/minimum-insertions-to-balance-a-parentheses-string.py | {
"start": 29,
"end": 494
} | class ____(object):
def minInsertions(self, s):
"""
:type s: str
:rtype: int
"""
add, bal = 0, 0
for c in s:
if c == '(':
if bal > 0 and bal%2:
add += 1
bal -= 1
bal += 2
else:
bal -= 1
if bal < 0:
add += 1
bal += 2
return add + bal
| Solution |
python | pypa__pip | src/pip/_internal/models/link.py | {
"start": 18984,
"end": 21793
} | class ____(NamedTuple):
"""Convert link for equivalency check.
This is used in the resolver to check whether two URL-specified requirements
likely point to the same distribution and can be considered equivalent. This
equivalency logic avoids comparing URLs literally, which can be too strict
(e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users.
Currently this does three things:
1. Drop the basic auth part. This is technically wrong since a server can
serve different content based on auth, but if it does that, it is even
impossible to guarantee two URLs without auth are equivalent, since
the user can input different auth information when prompted. So the
practical solution is to assume the auth doesn't affect the response.
2. Parse the query to avoid the ordering issue. Note that ordering under the
same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are
still considered different.
3. Explicitly drop most of the fragment part, except ``subdirectory=`` and
hash values, since it should have no impact the downloaded content. Note
that this drops the "egg=" part historically used to denote the requested
project (and extras), which is wrong in the strictest sense, but too many
people are supplying it inconsistently to cause superfluous resolution
conflicts, so we choose to also ignore them.
"""
parsed: urllib.parse.SplitResult
query: dict[str, list[str]]
subdirectory: str
hashes: dict[str, str]
def _clean_link(link: Link) -> _CleanResult:
parsed = link._parsed_url
netloc = parsed.netloc.rsplit("@", 1)[-1]
# According to RFC 8089, an empty host in file: means localhost.
if parsed.scheme == "file" and not netloc:
netloc = "localhost"
fragment = urllib.parse.parse_qs(parsed.fragment)
if "egg" in fragment:
logger.debug("Ignoring egg= fragment in %s", link)
try:
# If there are multiple subdirectory values, use the first one.
# This matches the behavior of Link.subdirectory_fragment.
subdirectory = fragment["subdirectory"][0]
except (IndexError, KeyError):
subdirectory = ""
# If there are multiple hash values under the same algorithm, use the
# first one. This matches the behavior of Link.hash_value.
hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment}
return _CleanResult(
parsed=parsed._replace(netloc=netloc, query="", fragment=""),
query=urllib.parse.parse_qs(parsed.query),
subdirectory=subdirectory,
hashes=hashes,
)
@functools.cache
def links_equivalent(link1: Link, link2: Link) -> bool:
return _clean_link(link1) == _clean_link(link2)
| _CleanResult |
python | zarr-developers__zarr-python | src/zarr/core/indexing.py | {
"start": 46673,
"end": 47569
} | class ____(CoordinateIndexer):
def __init__(
self, selection: MaskSelection, shape: tuple[int, ...], chunk_grid: ChunkGrid
) -> None:
# some initial normalization
selection_normalized = cast("tuple[MaskSelection]", ensure_tuple(selection))
selection_normalized = cast("tuple[MaskSelection]", replace_lists(selection_normalized))
# validation
if not is_mask_selection(selection_normalized, shape):
raise IndexError(
"invalid mask selection; expected one Boolean (mask)"
f"array with the same shape as the target array, got {selection_normalized!r}"
)
# convert to indices
selection_indices = np.nonzero(selection_normalized[0])
# delegate the rest to superclass
super().__init__(selection_indices, shape, chunk_grid)
@dataclass(frozen=True)
| MaskIndexer |
python | spyder-ide__spyder | spyder/widgets/collectionseditor.py | {
"start": 3936,
"end": 4780
} | class ____:
AddDelete = 'add_delete_section'
ViewAndRest = 'view_section'
# Maximum length of a serialized variable to be set in the kernel
MAX_SERIALIZED_LENGHT = 1e6
# To handle large collections
LARGE_NROWS = 100
ROWS_TO_LOAD = 50
# Numeric types
NUMERIC_TYPES = (int, float) + get_numeric_numpy_types()
# =============================================================================
# ---- Utility functions and classes
# =============================================================================
def natsort(s):
"""
Natural sorting, e.g. test3 comes before test100.
Taken from https://stackoverflow.com/a/16090640/3110740
"""
if not isinstance(s, (str, bytes)):
return s
x = [int(t) if t.isdigit() else t.lower() for t in re.split('([0-9]+)', s)]
return x
| CollectionsEditorToolbarSections |
python | getsentry__sentry | src/sentry/apidocs/parameters.py | {
"start": 31550,
"end": 32158
} | class ____:
QUERY = OpenApiParameter(
name="query",
location="query",
required=False,
type=str,
description="""The name of the Explore query you'd like to filter by.""",
)
SORT = OpenApiParameter(
name="sortBy",
location="query",
required=False,
type=str,
description="""The property to sort results by. If not specified, the results are sorted by query name.
Available fields are:
- `name`
- `dateCreated`
- `dateUpdated`
- `mostPopular`
- `recentlyViewed`
- `myqueries`
""",
)
| ExploreSavedQueriesParams |
python | huggingface__transformers | tests/pipelines/test_pipelines_object_detection.py | {
"start": 1368,
"end": 12546
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING
_dataset = None
@classmethod
def _load_dataset(cls):
# Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process.
if cls._dataset is None:
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
cls._dataset = datasets.load_dataset(
"hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1"
)
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
object_detector = ObjectDetectionPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def run_pipeline_test(self, object_detector, examples):
self._load_dataset()
outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0)
self.assertGreater(len(outputs), 0)
for detected_object in outputs:
self.assertEqual(
detected_object,
{
"score": ANY(float),
"label": ANY(str),
"box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)},
},
)
batch = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
self._dataset[0]["image"],
# LA
self._dataset[1]["image"],
# L
self._dataset[2]["image"],
]
batch_outputs = object_detector(batch, threshold=0.0)
self.assertEqual(len(batch), len(batch_outputs))
for outputs in batch_outputs:
self.assertGreater(len(outputs), 0)
for detected_object in outputs:
self.assertEqual(
detected_object,
{
"score": ANY(float),
"label": ANY(str),
"box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)},
},
)
compare_pipeline_output_to_hub_spec(detected_object, ObjectDetectionOutputElement)
@require_torch
def test_small_model_pt(self):
model_id = "hf-internal-testing/tiny-detr-mobilenetsv3"
model = AutoModelForObjectDetection.from_pretrained(model_id)
image_processor = AutoImageProcessor.from_pretrained(model_id)
object_detector = ObjectDetectionPipeline(model=model, image_processor=image_processor)
outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
)
outputs = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
threshold=0.0,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
],
)
@require_torch
@slow
def test_large_model_pt(self):
model_id = "facebook/detr-resnet-50"
model = AutoModelForObjectDetection.from_pretrained(model_id)
image_processor = AutoImageProcessor.from_pretrained(model_id)
object_detector = ObjectDetectionPipeline(model=model, image_processor=image_processor)
outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
)
outputs = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
],
)
@require_torch
@slow
def test_integration_torch_object_detection(self):
model_id = "facebook/detr-resnet-50"
object_detector = pipeline("object-detection", model=model_id)
outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
)
outputs = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
],
)
@require_torch
@slow
def test_threshold(self):
threshold = 0.9985
model_id = "facebook/detr-resnet-50"
object_detector = pipeline("object-detection", model=model_id)
outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
)
@require_torch
@require_pytesseract
@slow
def test_layoutlm(self):
model_id = "Narsil/layoutlmv3-finetuned-funsd"
threshold = 0.9993
object_detector = pipeline("object-detection", model=model_id, threshold=threshold)
outputs = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png"
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
],
)
| ObjectDetectionPipelineTests |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 150692,
"end": 152759
} | class ____:
# survival function references were computed with mpmath via
# from mpmath import mp
# x = mp.mpf(x)
# c = mp.mpf(x)
# float(mp.ncdf(-x)**c)
@pytest.mark.parametrize("x, c, ref",
[(9, 1, 1.1285884059538405e-19),
(20, 2, 7.582445786569958e-178),
(100, 0.02, 3.330957891903866e-44),
(200, 0.01, 1.3004759092324774e-87)])
def test_sf(self, x, c, ref):
assert_allclose(stats.powernorm.sf(x, c), ref, rtol=1e-13)
# inverse survival function references were computed with mpmath via
# from mpmath import mp
# def isf_mp(q, c):
# q = mp.mpf(q)
# c = mp.mpf(c)
# arg = q**(mp.one / c)
# return float(-mp.sqrt(2) * mp.erfinv(mp.mpf(2.) * arg - mp.one))
@pytest.mark.parametrize("q, c, ref",
[(1e-5, 20, -0.15690800666514138),
(0.99999, 100, -5.19933666203545),
(0.9999, 0.02, -2.576676052143387),
(5e-2, 0.02, 17.089518110222244),
(1e-18, 2, 5.9978070150076865),
(1e-50, 5, 6.361340902404057)])
def test_isf(self, q, c, ref):
assert_allclose(stats.powernorm.isf(q, c), ref, rtol=5e-12)
# CDF reference values were computed with mpmath via
# from mpmath import mp
# def cdf_mp(x, c):
# x = mp.mpf(x)
# c = mp.mpf(c)
# return float(mp.one - mp.ncdf(-x)**c)
@pytest.mark.parametrize("x, c, ref",
[(-12, 9, 1.598833900869911e-32),
(2, 9, 0.9999999999999983),
(-20, 9, 2.4782617067456103e-88),
(-5, 0.02, 5.733032242841443e-09),
(-20, 0.02, 5.507248237212467e-91)])
def test_cdf(self, x, c, ref):
assert_allclose(stats.powernorm.cdf(x, c), ref, rtol=5e-14)
| TestPowerNorm |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 874506,
"end": 874685
} | class ____(sgqlc.types.Type, ProjectV2FieldCommon, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ()
| ProjectV2Field |
python | chroma-core__chroma | chromadb/execution/executor/abstract.py | {
"start": 201,
"end": 470
} | class ____(Component):
@abstractmethod
def count(self, plan: CountPlan) -> int:
pass
@abstractmethod
def get(self, plan: GetPlan) -> GetResult:
pass
@abstractmethod
def knn(self, plan: KNNPlan) -> QueryResult:
pass
| Executor |
python | conda__conda | tests/plugins/test_transaction_hooks.py | {
"start": 306,
"end": 492
} | class ____(Action):
def verify(self):
pass
def execute(self):
pass
def reverse(self):
pass
def cleanup(self):
pass
| DummyTransactionAction |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_bmm_template.py | {
"start": 2293,
"end": 9386
} | class ____(CppGemmTemplate):
def __init__(
self,
input_nodes,
layout: ir.Layout,
num_threads: int,
register_blocking: GemmBlocking,
beta=1,
alpha=1,
has_bias=False,
epilogue_creator: Optional[Callable[[ir.Buffer], ir.Pointwise]] = None,
should_block_weights: bool = False,
name="bmm",
):
"""
In order to simplify the implementation and increase code reuse, the BMM template implements
two versions of the GEMM kernel: a single-threaded version and a multi-threaded version.
GEMM kernels are called in a loop over the batch dimension, with single-threaded GEMM calls
for all but the last (B % num_threads), which are handled by the multi-threaded GEMM kernel.
We use an extra sizevar `b_index` to index the batch dimension, which we pass into the GEMM
template as a sympy.Symbol. This allows us to slice the 3D batch tensors in the GEMM template
without any changes to the GEMM template itself.
"""
super().__init__(
input_nodes,
layout,
num_threads,
register_blocking,
beta=beta,
alpha=alpha,
has_bias=has_bias,
epilogue_creator=epilogue_creator,
should_block_weights=should_block_weights,
name=name,
)
self.b_index = sympy.Symbol("s_b_index", integer=True, nonnegative=True)
@staticmethod
def get_padded_size(n, block_n, k, should_block_weight):
if should_block_weight:
# Tensor is constant or not contiguous, so we will pad and block
new_size, padded_n = CppGemmTemplate.get_padded_size(
n, block_n, k, should_block_weight
)
# Add the new batch dimension
new_size.insert(0, -1)
return new_size, padded_n
else:
new_size = [-1, k, n]
return new_size, n
@staticmethod
def check_if_block_weight(W, micro_gemm):
assert isinstance(W, ir.IRNode)
_, n = W.get_size()[-2:]
result = (
not W.get_layout().is_contiguous()
or W.get_name() in V.graph.constants
or (
n % micro_gemm.register_blocking.block_n != 0
and micro_gemm.get_b_layout != LayoutType.NORMAL
)
)
return result
def get_gemm_function_call(
self,
kernel: CppTemplateKernel,
function_name: str,
placeholder: str,
b_index: str,
) -> str:
"""
Similar to 'def_kernel' in cpp_template_kernel, but instead of generating a function definition,
generate a function call for the GEMM kernel.
Args:
placeholder: The string to replace the function call with
b_index: The index for slicing the 3D batch tensors
"""
def hook():
arg_defs, call_args, _, _ = kernel.args.python_argdefs()
for i, buf in enumerate(call_args):
if buf == self.b_index:
arg_defs[i] = ArgName(b_index)
call = f"{function_name}({', '.join(x.full_name() for x in arg_defs)});"
return call
assert placeholder not in kernel.render_hooks
kernel.render_hooks[placeholder] = hook
return placeholder
def get_default_reindexers(self, epilogue_nodes):
def reindexer(args):
# if epilogue nodes exist, they have 3D ranges but args are 2D, so add 0 index
return [self.b_index] + args
return [reindexer] * len(epilogue_nodes)
def get_options(
self,
kernel: CppTemplateKernel,
template_buffer_node: Optional[ir.CppTemplateBuffer] = None,
flag_template_buffer_has_other_users: Optional[bool] = None,
epilogue_nodes: Optional[list[ir.IRNode]] = None,
**kwargs,
) -> dict[str, Any]:
options = super().get_options(
kernel=kernel,
template_buffer_node=template_buffer_node,
flag_template_buffer_has_other_users=flag_template_buffer_has_other_users,
epilogue_nodes=epilogue_nodes,
**kwargs,
)
BX, BW, BY = options["X"], options["W"], options["Y"]
options["BX"], options["BW"], options["BY"] = BX, BW, BY
options["BY_2d"] = options["Y_2d"]
for kword in ["X", "W", "GemmOut", "Y_2d"]:
options[kword] = kernel.select(options[kword], 0, self.b_index)
for kword in ["X", "W", "Y_2d"]:
options[kword + "_dtype"] = DTYPE_TO_CPP[options[kword].dtype]
options["b_index"] = self.b_index
options["BY_sizevars"] = [
s
for sym in itertools.chain(BY.get_size(), BY.get_stride())
if isinstance(sym, sympy.Expr)
for s in sym.free_symbols
]
options["kernel_name"] = kernel.kernel_name
return options
def render( # type: ignore[override, return]
self,
kernel: CppTemplateKernel,
template_buffer_node: Optional[ir.CppTemplateBuffer] = None,
flag_template_buffer_has_other_users: Optional[bool] = None,
epilogue_nodes: Optional[list[ir.IRNode]] = None,
**kwargs,
) -> str:
options = self.get_options(
kernel=kernel,
template_buffer_node=template_buffer_node,
flag_template_buffer_has_other_users=flag_template_buffer_has_other_users,
epilogue_nodes=epilogue_nodes,
**kwargs,
)
self.render_options = options
with contextlib.ExitStack() as stack:
for buf in options["fake_buffers"]:
stack.enter_context(
patch.object(V.graph, "get_dtype", self._fake_get_dtype(buf))
)
result = self._template_from_string(BMM_TEMPLATE).render(**options)
# Finalize the function definitions for the gemm routines
sub_mm_hooks = {
name: hook
for name, hook in kernel.render_hooks.items()
if "FOR_BMM" in name
}
result = PartialRender(result, sub_mm_hooks).finalize_all()
for name in sub_mm_hooks:
del kernel.render_hooks[name]
del kernel.args.sizevars[options["b_index"]]
return result
def codegen_single_thread_gemm(self):
stub = self._template_from_string(GEMM_SINGLE_THREAD_MM_STUB).render(
self.render_options
)
return stub + self._template_from_string(GEMM_TEMPLATE).render(
{**self.render_options, "num_threads": 1}
)
def codegen_multi_thread_gemm(self):
stub = self._template_from_string(GEMM_THREADED_MM_STUB).render(
self.render_options
)
return stub + self._template_from_string(GEMM_TEMPLATE).render(
self.render_options
)
def codegen_gemm_stub_def(self):
return ""
| CppBmmTemplate |
python | celery__celery | t/unit/app/test_routes.py | {
"start": 3696,
"end": 7372
} | class ____(RouteCase):
def test_init_queues(self):
router = Router(self.app, queues=None)
assert router.queues == {}
def test_lookup_takes_first(self):
set_queues(self.app, foo=self.a_queue, bar=self.b_queue)
R = routes.prepare(({self.mytask.name: {'queue': 'bar'}},
{self.mytask.name: {'queue': 'foo'}}))
router = Router(self.app, R, self.app.amqp.queues)
self.assert_routes_to_queue('bar', router, self.mytask.name)
def test_expands_queue_in_options(self):
set_queues(self.app)
R = routes.prepare(())
router = Router(
self.app, R, self.app.amqp.queues, create_missing=True,
)
# apply_async forwards all arguments, even exchange=None etc,
# so need to make sure it's merged correctly.
route = router.route(
{'queue': 'testq',
'exchange': None,
'routing_key': None,
'immediate': False},
self.mytask.name,
args=[1, 2], kwargs={},
)
assert route['queue'].name == 'testq'
assert route['queue'].exchange == Exchange('testq')
assert route['queue'].routing_key == 'testq'
assert route['immediate'] is False
def test_expand_destination_string(self):
set_queues(self.app, foo=self.a_queue, bar=self.b_queue)
x = Router(self.app, {}, self.app.amqp.queues)
dest = x.expand_destination('foo')
assert dest['queue'].name == 'foo'
def test_expand_destination__Queue(self):
queue = Queue('foo')
x = Router(self.app, {}, self.app.amqp.queues)
dest = x.expand_destination({'queue': queue})
assert dest['queue'] is queue
def test_lookup_paths_traversed(self):
self.simple_queue_setup()
R = routes.prepare((
{'celery.xaza': {'queue': 'bar'}},
{self.mytask.name: {'queue': 'foo'}}
))
router = Router(self.app, R, self.app.amqp.queues)
self.assert_routes_to_queue('foo', router, self.mytask.name)
self.assert_routes_to_default_queue(router, 'celery.poza')
def test_compat_router_class(self):
self.simple_queue_setup()
R = routes.prepare((
TestRouter(),
))
router = Router(self.app, R, self.app.amqp.queues)
self.assert_routes_to_queue('bar', router, 'celery.xaza')
self.assert_routes_to_default_queue(router, 'celery.poza')
def test_router_fun__called_with(self):
self.simple_queue_setup()
step = Mock(spec=['__call__'])
step.return_value = None
R = routes.prepare([step])
router = Router(self.app, R, self.app.amqp.queues)
self.mytask.apply_async((2, 2), {'kw': 3}, router=router, priority=3)
step.assert_called_with(
self.mytask.name, (2, 2), {'kw': 3}, ANY,
task=self.mytask,
)
options = step.call_args[0][3]
assert options['priority'] == 3
def test_compat_router_classes__called_with(self):
self.simple_queue_setup()
step = Mock(spec=['route_for_task'])
step.route_for_task.return_value = None
R = routes.prepare([step])
router = Router(self.app, R, self.app.amqp.queues)
self.mytask.apply_async((2, 2), {'kw': 3}, router=router, priority=3)
step.route_for_task.assert_called_with(
self.mytask.name, (2, 2), {'kw': 3},
)
def simple_queue_setup(self):
set_queues(
self.app, foo=self.a_queue, bar=self.b_queue,
**{self.app.conf.task_default_queue: self.d_queue})
| test_lookup_route |
python | spack__spack | lib/spack/spack/llnl/util/tty/log.py | {
"start": 23992,
"end": 26601
} | class ____:
"""Wrapper class to handle redirection of io streams"""
def __init__(self, sys_attr):
self.sys_attr = sys_attr
self.saved_stream = None
if sys.platform.startswith("win32"):
if hasattr(sys, "gettotalrefcount"): # debug build
libc = ctypes.CDLL("ucrtbased")
else:
libc = ctypes.CDLL("api-ms-win-crt-stdio-l1-1-0")
kernel32 = ctypes.WinDLL("kernel32")
# https://docs.microsoft.com/en-us/windows/console/getstdhandle
if self.sys_attr == "stdout":
STD_HANDLE = -11
elif self.sys_attr == "stderr":
STD_HANDLE = -12
else:
raise KeyError(self.sys_attr)
c_stdout = kernel32.GetStdHandle(STD_HANDLE)
self.libc = libc
self.c_stream = c_stdout
else:
self.libc = ctypes.CDLL(None)
self.c_stream = ctypes.c_void_p.in_dll(self.libc, self.sys_attr)
self.sys_stream = getattr(sys, self.sys_attr)
self.orig_stream_fd = self.sys_stream.fileno()
# Save a copy of the original stdout fd in saved_stream
self.saved_stream = os.dup(self.orig_stream_fd)
def redirect_stream(self, to_fd):
"""Redirect stdout to the given file descriptor."""
# Flush the C-level buffer stream
if sys.platform.startswith("win32"):
self.libc.fflush(None)
else:
self.libc.fflush(self.c_stream)
# Flush and close sys_stream - also closes the file descriptor (fd)
sys_stream = getattr(sys, self.sys_attr)
sys_stream.flush()
sys_stream.close()
# Make orig_stream_fd point to the same file as to_fd
os.dup2(to_fd, self.orig_stream_fd)
# Set sys_stream to a new stream that points to the redirected fd
new_buffer = open(self.orig_stream_fd, "wb")
new_stream = io.TextIOWrapper(new_buffer)
setattr(sys, self.sys_attr, new_stream)
self.sys_stream = getattr(sys, self.sys_attr)
def flush(self):
if sys.platform.startswith("win32"):
self.libc.fflush(None)
else:
self.libc.fflush(self.c_stream)
self.sys_stream.flush()
def close(self):
"""Redirect back to the original system stream, and close stream"""
try:
if self.saved_stream is not None:
self.redirect_stream(self.saved_stream)
finally:
if self.saved_stream is not None:
os.close(self.saved_stream)
| StreamWrapper |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/types.py | {
"start": 1631,
"end": 1939
} | class ____:
operator_classes = OperatorClass.BASE | OperatorClass.COMPARISON
def coerce_compared_value(
self, op: Optional[OperatorType], value: Any
) -> TypeEngine[Any]:
if TYPE_CHECKING:
assert isinstance(self, TypeEngine)
return self
| _NetworkAddressTypeMixin |
python | getsentry__sentry | tests/sentry/notifications/test_apps.py | {
"start": 174,
"end": 1485
} | class ____(TestCase):
def test_registers_legacy_providers(self) -> None:
"""
This django app doesn't actually register these legacy providers because it would result
in some circular breakages from all the __init__.py imports. We'll still test it here
to make sure it doesn't break in the future.
If this test is failing for you, ensure all `@register_notification_provider` decorators
are triggered by an initialization import.
"""
from sentry.notifications.notify import registry
assert len(registry) == 3
assert registry[ExternalProviders.EMAIL] is not None
assert registry[ExternalProviders.SLACK] is not None
assert registry[ExternalProviders.MSTEAMS] is not None
def test_registers_platform_providers(self) -> None:
from sentry.notifications.platform.registry import provider_registry
assert len(provider_registry.registrations) == 4
assert provider_registry.get(NotificationProviderKey.DISCORD) is not None
assert provider_registry.get(NotificationProviderKey.EMAIL) is not None
assert provider_registry.get(NotificationProviderKey.MSTEAMS) is not None
assert provider_registry.get(NotificationProviderKey.SLACK) is not None
| NotificationsDjangoAppTest |
python | plotly__plotly.py | tests/test_core/test_colors/test_colors.py | {
"start": 106,
"end": 7569
} | class ____(TestCase):
def test_validate_colors(self):
# test string input
color_string = "foo"
pattern = (
"If your colors variable is a string, it must be a "
"Plotly scale, an rgb color or a hex color."
)
self.assertRaisesRegex(
PlotlyError, pattern, colors.validate_colors, color_string
)
# test rgb color
color_string2 = "rgb(265, 0, 0)"
pattern2 = "Whoops! The elements in your rgb colors tuples cannot exceed 255.0."
self.assertRaisesRegex(
PlotlyError, pattern2, colors.validate_colors, color_string2
)
# test tuple color
color_tuple = (1, 1, 2)
pattern3 = "Whoops! The elements in your colors tuples cannot exceed 1.0."
self.assertRaisesRegex(
PlotlyError, pattern3, colors.validate_colors, color_tuple
)
def test_convert_colors_to_same_type(self):
# test colortype
color_tuple = ["#aaaaaa", "#bbbbbb", "#cccccc"]
scale = [0, 1]
self.assertRaises(
PlotlyError, colors.convert_colors_to_same_type, color_tuple, scale=scale
)
# test colortype
color_tuple = (1, 1, 1)
colortype = 2
pattern2 = "You must select either rgb or tuple for your colortype variable."
self.assertRaisesRegex(
PlotlyError,
pattern2,
colors.convert_colors_to_same_type,
color_tuple,
colortype,
)
def test_convert_dict_colors_to_same_type(self):
# test colortype
color_dict = dict(apple="rgb(1, 1, 1)")
colortype = 2
pattern = "You must select either rgb or tuple for your colortype variable."
self.assertRaisesRegex(
PlotlyError,
pattern,
colors.convert_dict_colors_to_same_type,
color_dict,
colortype,
)
def test_validate_scale_values(self):
# test that scale length is at least 2
scale = [0]
pattern = "You must input a list of scale values that has at least two values."
self.assertRaisesRegex(
PlotlyError, pattern, colors.validate_scale_values, scale
)
# test if first and last number is 0 and 1 respectively
scale = [0, 1.1]
pattern = (
"The first and last number in your scale must be 0.0 and 1.0 respectively."
)
self.assertRaisesRegex(
PlotlyError, pattern, colors.validate_scale_values, scale
)
# test numbers increase
scale = [0, 2, 1]
pattern = (
"'scale' must be a list that contains a strictly "
"increasing sequence of numbers."
)
self.assertRaisesRegex(
PlotlyError, pattern, colors.validate_scale_values, scale
)
def test_make_colorscale(self):
# test minimum colors length
color_list = [(0, 0, 0)]
pattern = "You must input a list of colors that has at least two colors."
self.assertRaisesRegex(PlotlyError, pattern, colors.make_colorscale, color_list)
# test length of colors and scale
color_list2 = [(0, 0, 0), (1, 1, 1)]
scale = [0]
pattern2 = "The length of colors and scale must be the same."
self.assertRaisesRegex(
PlotlyError, pattern2, colors.make_colorscale, color_list2, scale
)
def test_get_colorscale(self):
# test for incorrect input type
pattern = "Name argument have to be a string."
name = colors.sequential.haline
self.assertRaisesRegex(PlotlyError, pattern, colors.get_colorscale, name)
# test for non-existing colorscale
pattern = r"Colorscale \S+ is not a built-in scale."
name = "foo"
self.assertRaisesRegex(PlotlyError, pattern, colors.get_colorscale, name)
# test non-capitalised access
self.assertEqual(
colors.make_colorscale(colors.sequential.haline),
colors.get_colorscale("haline"),
)
# test capitalised access
self.assertEqual(
colors.make_colorscale(colors.diverging.Earth),
colors.get_colorscale("Earth"),
)
# test accessing non-capitalised scale with capitalised name
self.assertEqual(
colors.make_colorscale(colors.cyclical.mrybm),
colors.get_colorscale("Mrybm"),
)
# test accessing capitalised scale with non-capitalised name
self.assertEqual(
colors.make_colorscale(colors.sequential.Viridis),
colors.get_colorscale("viridis"),
)
# test accessing reversed scale
self.assertEqual(
colors.make_colorscale(colors.diverging.Portland_r),
colors.get_colorscale("portland_r"),
)
def test_sample_colorscale(self):
# test that sampling a colorscale at the defined points returns the same
defined_colors = colors.sequential.Inferno
sampled_colors = colors.sample_colorscale(
defined_colors, len(defined_colors), colortype="rgb"
)
defined_colors_rgb = colors.convert_colors_to_same_type(
defined_colors, colortype="rgb"
)[0]
self.assertEqual(sampled_colors, defined_colors_rgb)
# test sampling an easy colorscale that goes [red, green, blue]
defined_colors = ["rgb(255,0,0)", "rgb(0,255,0)", "rgb(0,0,255)"]
samplepoints = [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]
expected_output = [
(1.0, 0.0, 0.0),
(0.75, 0.25, 0.0),
(0.5, 0.5, 0.0),
(0.25, 0.75, 0.0),
(0.0, 1.0, 0.0),
(0.0, 0.75, 0.25),
(0.0, 0.5, 0.5),
(0.0, 0.25, 0.75),
(0.0, 0.0, 1.0),
]
output = colors.sample_colorscale(
defined_colors, samplepoints, colortype="tuple"
)
self.assertEqual(expected_output, output)
self.assertEqual(
colors.sample_colorscale("TuRbId_r", 12),
colors.sequential.turbid_r,
)
def test_n_colors(self):
# test that n_colors constrains values to between 0 and 255
generated_colorscale = colors.n_colors(
lowcolor="rgb(255,0,0)",
highcolor="rgb(0,255,0)",
n_colors=14,
colortype="rgb",
)
expected_colorscale = [
"rgb(255.0, 0.0, 0.0)",
"rgb(235.3846153846154, 19.615384615384617, 0.0)",
"rgb(215.76923076923077, 39.23076923076923, 0.0)",
"rgb(196.15384615384613, 58.846153846153854, 0.0)",
"rgb(176.53846153846155, 78.46153846153847, 0.0)",
"rgb(156.9230769230769, 98.07692307692308, 0.0)",
"rgb(137.3076923076923, 117.69230769230771, 0.0)",
"rgb(117.69230769230768, 137.30769230769232, 0.0)",
"rgb(98.07692307692307, 156.92307692307693, 0.0)",
"rgb(78.46153846153845, 176.53846153846155, 0.0)",
"rgb(58.84615384615384, 196.15384615384616, 0.0)",
"rgb(39.230769230769226, 215.76923076923077, 0.0)",
"rgb(19.615384615384585, 235.38461538461542, 0.0)",
"rgb(0.0, 255.0, 0.0)",
]
self.assertEqual(generated_colorscale, expected_colorscale)
| TestColors |
python | openai__openai-python | src/openai/types/responses/response_function_web_search.py | {
"start": 765,
"end": 911
} | class ____(BaseModel):
type: Literal["open_page"]
"""The action type."""
url: str
"""The URL opened by the model."""
| ActionOpenPage |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 26410,
"end": 27201
} | class ____(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.attention = FlavaSelfAttention(config)
self.output = FlavaSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
self_outputs = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
| FlavaAttention |
python | getsentry__sentry | tests/sentry/preprod/test_tasks.py | {
"start": 18996,
"end": 24166
} | class ____(BaseAssembleTest):
def setUp(self) -> None:
super().setUp()
self.preprod_artifact = PreprodArtifact.objects.create(
project=self.project, state=PreprodArtifact.ArtifactState.UPLOADED
)
def _run_task_and_verify_status(
self, content, checksum=None, chunks=None, artifact_id=None, org_id=None, project_id=None
):
checksum = checksum or sha1(content).hexdigest()
blob = FileBlob.from_file_with_organization(ContentFile(content), self.organization)
chunks = chunks or [blob.checksum]
assemble_preprod_artifact_size_analysis(
org_id=org_id or self.organization.id,
project_id=project_id or self.project.id,
checksum=checksum,
chunks=chunks,
artifact_id=artifact_id or self.preprod_artifact.id,
)
status, details = get_assemble_status(
AssembleTask.PREPROD_ARTIFACT_SIZE_ANALYSIS, project_id or self.project.id, checksum
)
delete_assemble_status(
AssembleTask.PREPROD_ARTIFACT_SIZE_ANALYSIS, project_id or self.project.id, checksum
)
return status, details
def test_assemble_preprod_artifact_size_analysis_success(self) -> None:
status, details = self._run_task_and_verify_status(
b'{"analysis_duration": 1.5, "download_size": 1000, "install_size": 2000, "treemap": null, "analysis_version": null}'
)
assert status == ChunkFileState.OK
assert details is None
# Verify size analysis file and size metrics creation
size_files = File.objects.filter(type="preprod.file")
assert len(size_files) == 1
assert size_files[0].name.startswith("preprod-file-")
# Verify PreprodArtifactSizeMetrics record was created
size_metrics = PreprodArtifactSizeMetrics.objects.filter(
preprod_artifact=self.preprod_artifact
)
assert len(size_metrics) == 1
assert size_metrics[0].analysis_file_id == size_files[0].id
assert size_metrics[0].state == PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED
assert (
size_metrics[0].metrics_artifact_type
== PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT
)
def test_assemble_preprod_artifact_size_analysis_update_existing(self) -> None:
# Create an existing size metrics record
existing_size_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=self.preprod_artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.PENDING,
)
status, details = self._run_task_and_verify_status(
b'{"analysis_duration": 1.5, "download_size": 1000, "install_size": 2000, "treemap": null, "analysis_version": null}'
)
assert status == ChunkFileState.OK
assert details is None
# Verify size analysis file was created
size_files = File.objects.filter(type="preprod.file")
assert len(size_files) == 1
assert size_files[0].name.startswith("preprod-file-")
# Verify existing PreprodArtifactSizeMetrics record was updated (not created new)
size_metrics = PreprodArtifactSizeMetrics.objects.filter(
preprod_artifact=self.preprod_artifact
)
assert len(size_metrics) == 1 # Should still be only 1 record
assert size_metrics[0].id == existing_size_metrics.id # Should be the same record
assert size_metrics[0].analysis_file_id == size_files[0].id
assert size_metrics[0].state == PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED
def test_assemble_preprod_artifact_size_analysis_error_cases(self) -> None:
# Test nonexistent artifact
status, details = self._run_task_and_verify_status(
b"nonexistent artifact", artifact_id=99999
)
assert status == ChunkFileState.ERROR
# Test checksum mismatch
status, details = self._run_task_and_verify_status(b"checksum mismatch", checksum="b" * 40)
assert status == ChunkFileState.ERROR
assert "checksum mismatch" in details
# Test missing chunks
status, details = self._run_task_and_verify_status(
b"missing chunks", chunks=["nonexistent" + "1" * 32]
)
assert status == ChunkFileState.ERROR
assert "Not all chunks available" in details
# Test nonexistent org
status, details = self._run_task_and_verify_status(b"nonexistent org", org_id=99999)
assert status == ChunkFileState.ERROR
# Test nonexistent project
status, details = self._run_task_and_verify_status(b"nonexistent project", project_id=99999)
assert status == ChunkFileState.ERROR
# Verify no size metrics were created for error cases
size_metrics = PreprodArtifactSizeMetrics.objects.filter(
preprod_artifact=self.preprod_artifact
)
assert len(size_metrics) == 0
| AssemblePreprodArtifactSizeAnalysisTest |
python | google__pytype | pytype/block_environment_test.py | {
"start": 669,
"end": 701
} | class ____:
id: int
| FakeVariable |
python | getsentry__sentry | src/sentry/search/events/datasets/discover.py | {
"start": 2858,
"end": 87923
} | class ____(DatasetConfig):
custom_threshold_columns = {
"apdex()",
"count_miserable(user)",
"user_misery()",
}
non_nullable_keys = {"event.type"}
nullable_context_keys = {"thread.id"}
use_entity_prefix_for_fields: bool = False
def __init__(self, builder: BaseQueryBuilder):
self.builder = builder
self.total_count: int | None = None
self.total_sum_transaction_duration: float | None = None
@property
def search_filter_converter(
self,
) -> Mapping[str, Callable[[SearchFilter], WhereType | None]]:
return {
"environment": self.builder._environment_filter_converter,
"message": self._message_filter_converter,
PROJECT_ALIAS: self._project_slug_filter_converter,
PROJECT_NAME_ALIAS: self._project_slug_filter_converter,
ISSUE_ALIAS: self._issue_filter_converter,
ISSUE_ID_ALIAS: self._issue_id_filter_converter,
RELEASE_ALIAS: self._release_filter_converter,
TRANSACTION_STATUS_ALIAS: self._transaction_status_filter_converter,
ERROR_HANDLED_ALIAS: self._error_handled_filter_converter,
ERROR_UNHANDLED_ALIAS: self._error_unhandled_filter_converter,
TEAM_KEY_TRANSACTION_ALIAS: self._key_transaction_filter_converter,
RELEASE_STAGE_ALIAS: self._release_stage_filter_converter,
SEMVER_ALIAS: self._semver_filter_converter,
SEMVER_PACKAGE_ALIAS: self._semver_package_filter_converter,
SEMVER_BUILD_ALIAS: self._semver_build_filter_converter,
TRACE_PARENT_SPAN_ALIAS: self._trace_parent_span_converter,
"performance.issue_ids": self._performance_issue_ids_filter_converter,
EVENT_TYPE_ALIAS: self._event_type_filter_converter,
"transaction": self._transaction_filter_converter,
}
@property
def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:
return {
PROJECT_ALIAS: self._resolve_project_slug_alias,
PROJECT_NAME_ALIAS: self._resolve_project_slug_alias,
# NOTE: `ISSUE_ALIAS` simply maps to the id, meaning that post processing
# is required to insert the true issue short id into the response.
ISSUE_ALIAS: self._resolve_issue_id_alias,
ISSUE_ID_ALIAS: self._resolve_issue_id_alias,
TIMESTAMP_TO_HOUR_ALIAS: self._resolve_timestamp_to_hour_alias,
TIMESTAMP_TO_DAY_ALIAS: self._resolve_timestamp_to_day_alias,
USER_DISPLAY_ALIAS: self._resolve_user_display_alias,
PROJECT_THRESHOLD_CONFIG_ALIAS: lambda _: self._resolve_project_threshold_config,
ERROR_HANDLED_ALIAS: self._resolve_error_handled_alias,
ERROR_UNHANDLED_ALIAS: self._resolve_error_unhandled_alias,
TEAM_KEY_TRANSACTION_ALIAS: self._resolve_team_key_transaction_alias,
MEASUREMENTS_FRAMES_SLOW_RATE: self._resolve_measurements_frames_slow_rate,
MEASUREMENTS_FRAMES_FROZEN_RATE: self._resolve_measurements_frames_frozen_rate,
MEASUREMENTS_STALL_PERCENTAGE: self._resolve_measurements_stall_percentage,
HTTP_STATUS_CODE_ALIAS: self._resolve_http_status_code,
TOTAL_COUNT_ALIAS: self._resolve_total_count,
TOTAL_TRANSACTION_DURATION_ALIAS: self._resolve_total_sum_transaction_duration,
DEVICE_CLASS_ALIAS: self._resolve_device_class,
PRECISE_FINISH_TS: lambda alias: field_aliases.resolve_precise_timestamp(
Column("finish_ts"), Column("finish_ms"), alias
),
PRECISE_START_TS: lambda alias: field_aliases.resolve_precise_timestamp(
Column("start_ts"), Column("start_ms"), alias
),
}
@property
def function_converter(self) -> Mapping[str, SnQLFunction]:
function_converter = {
function.name: function
for function in [
SnQLFunction(
"failure_count",
snql_aggregate=lambda _, alias: Function(
"countIf",
[
Function(
"notIn",
[
self.builder.column("transaction.status"),
[
SPAN_STATUS_NAME_TO_CODE[status]
for status in NON_FAILURE_STATUS
],
],
)
],
alias,
),
default_result_type="integer",
),
SnQLFunction(
"apdex",
optional_args=[NullableNumberRange("satisfaction", 0, None)],
snql_aggregate=self._resolve_apdex_function,
default_result_type="number",
),
SnQLFunction(
"count_miserable",
required_args=[ColumnTagArg("column")],
optional_args=[NullableNumberRange("satisfaction", 0, None)],
calculated_args=[
{
"name": "tolerated",
"fn": lambda args: (
args["satisfaction"] * 4.0
if args["satisfaction"] is not None
else None
),
}
],
snql_aggregate=self._resolve_count_miserable_function,
default_result_type="integer",
),
SnQLFunction(
"user_misery",
# To correct for sensitivity to low counts, User Misery is modeled as a Beta Distribution Function.
# With prior expectations, we have picked the expected mean user misery to be 0.05 and variance
# to be 0.0004. This allows us to calculate the alpha (5.8875) and beta (111.8625) parameters,
# with the user misery being adjusted for each fast/slow unique transaction. See:
# https://stats.stackexchange.com/questions/47771/what-is-the-intuition-behind-beta-distribution
# for an intuitive explanation of the Beta Distribution Function.
optional_args=[
NullableNumberRange("satisfaction", 0, None),
with_default(MISERY_ALPHA, NumberRange("alpha", 0, None)),
with_default(MISERY_BETA, NumberRange("beta", 0, None)),
],
calculated_args=[
{
"name": "tolerated",
"fn": lambda args: (
args["satisfaction"] * 4.0
if args["satisfaction"] is not None
else None
),
},
{"name": "parameter_sum", "fn": lambda args: args["alpha"] + args["beta"]},
],
snql_aggregate=self._resolve_user_misery_function,
default_result_type="number",
),
SnQLFunction(
"count",
optional_args=[NullColumn("column")],
snql_aggregate=lambda _, alias: Function(
"count",
[],
alias,
),
default_result_type="integer",
),
SnQLFunction(
"count_web_vitals",
required_args=[
NumericColumn("column"),
SnQLStringArg("quality", allowed_strings=["good", "meh", "poor", "any"]),
],
snql_aggregate=self._resolve_web_vital_function,
default_result_type="integer",
),
SnQLFunction(
"last_seen",
snql_aggregate=lambda _, alias: Function(
"max",
[self.builder.column("timestamp")],
alias,
),
default_result_type="date",
redundant_grouping=True,
),
SnQLFunction(
"latest_event",
snql_aggregate=lambda _, alias: Function(
"argMax",
[self.builder.column("id"), self.builder.column("timestamp")],
alias,
),
default_result_type="string",
),
SnQLFunction(
"failure_rate",
snql_aggregate=lambda _, alias: Function(
"failure_rate",
[],
alias,
),
default_result_type="percentage",
),
SnQLFunction(
"group_uniq_array",
required_args=[NumberRange("max_size", 0, 101), ColumnTagArg("column")],
snql_aggregate=lambda args, alias: CurriedFunction(
"groupUniqArray",
[int(args["max_size"])],
[args["column"]],
alias,
),
default_result_type="string", # TODO: support array type
private=True,
),
SnQLFunction(
"percentile",
required_args=[
NumericColumn("column"),
NumberRange("percentile", 0, 1),
],
snql_aggregate=self._resolve_percentile,
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
combinators=[
SnQLArrayCombinator("column", NumericColumn.numeric_array_columns)
],
),
SnQLFunction(
"p50",
optional_args=[
with_default("transaction.duration", NumericColumn("column")),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.5),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p75",
optional_args=[
with_default("transaction.duration", NumericColumn("column")),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.75),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p90",
optional_args=[
with_default("transaction.duration", NumericColumn("column")),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.90),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p95",
optional_args=[
with_default("transaction.duration", NumericColumn("column")),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.95),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p99",
optional_args=[
with_default("transaction.duration", NumericColumn("column")),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 0.99),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"p100",
optional_args=[
with_default("transaction.duration", NumericColumn("column")),
],
snql_aggregate=lambda args, alias: self._resolve_percentile(args, alias, 1),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"to_other",
required_args=[
ColumnArg(
"column",
allowed_columns=["release", "trace.parent_span", "id", "trace.span"],
),
SnQLStringArg("value", unquote=True, unescape_quotes=True),
],
optional_args=[
with_default("that", SnQLStringArg("that")),
with_default("this", SnQLStringArg("this")),
],
snql_column=lambda args, alias: Function(
"if",
[
Function("equals", [args["column"], args["value"]]),
args["this"],
args["that"],
],
alias,
),
),
SnQLFunction(
"percentile_range",
required_args=[
NumericColumn("column"),
NumberRange("percentile", 0, 1),
ConditionArg("condition"),
SnQLDateArg("middle"),
],
snql_aggregate=lambda args, alias: Function(
f"quantileIf({args['percentile']:.2f})",
[
args["column"],
# This condition is written in this seemingly backwards way because of limitations
# in the json query syntax.
# TODO(snql-migration): Once the trends endpoint is using snql, we should update it
# and flip these conditions back
Function(
args["condition"],
[
Function("toDateTime", [args["middle"]]),
self.builder.column("timestamp"),
],
),
],
alias,
),
default_result_type="duration",
),
SnQLFunction(
"random_number",
snql_aggregate=lambda args, alias: Function(
"rand",
[],
alias,
),
default_result_type="integer",
private=True,
),
SnQLFunction(
"modulo",
required_args=[SnQLStringArg("column"), NumberRange("factor", None, None)],
snql_aggregate=lambda args, alias: Function(
"modulo",
[Column(args["column"]), args["factor"]],
alias,
),
default_result_type="integer",
private=True,
),
SnQLFunction(
"avg_range",
required_args=[
NumericColumn("column"),
ConditionArg("condition"),
SnQLDateArg("middle"),
],
snql_aggregate=lambda args, alias: Function(
"avgIf",
[
args["column"],
# see `percentile_range` for why this condition feels backwards
Function(
args["condition"],
[
Function("toDateTime", [args["middle"]]),
self.builder.column("timestamp"),
],
),
],
alias,
),
default_result_type="duration",
),
SnQLFunction(
"variance_range",
required_args=[
NumericColumn("column"),
ConditionArg("condition"),
SnQLDateArg("middle"),
],
snql_aggregate=lambda args, alias: Function(
"varSampIf",
[
args["column"],
# see `percentile_range` for why this condition feels backwards
Function(
args["condition"],
[
Function("toDateTime", [args["middle"]]),
self.builder.column("timestamp"),
],
),
],
alias,
),
default_result_type="duration",
),
SnQLFunction(
"count_range",
required_args=[ConditionArg("condition"), SnQLDateArg("middle")],
snql_aggregate=lambda args, alias: Function(
"countIf",
[
# see `percentile_range` for why this condition feels backwards
Function(
args["condition"],
[
Function("toDateTime", [args["middle"]]),
self.builder.column("timestamp"),
],
),
],
alias,
),
default_result_type="integer",
),
SnQLFunction(
"count_if",
required_args=[
ColumnTagArg("column"),
ConditionArg("condition"),
SnQLStringArg(
"value", unquote=True, unescape_quotes=True, optional_unquote=True
),
],
calculated_args=[
{
"name": "typed_value",
"fn": normalize_count_if_value,
},
{
"name": "normalized_condition",
"fn": normalize_count_if_condition,
},
{
"name": "is_array_field",
"fn": lambda args: args["column"] in ARRAY_FIELDS,
},
],
snql_aggregate=self._resolve_count_if,
default_result_type="integer",
),
SnQLFunction(
"count_unique",
required_args=[ColumnTagArg("column")],
snql_aggregate=lambda args, alias: Function("uniq", [args["column"]], alias),
default_result_type="integer",
),
SnQLFunction(
"count_at_least",
required_args=[NumericColumn("column"), NumberRange("threshold", 0, None)],
snql_aggregate=lambda args, alias: Function(
"countIf",
[Function("greaterOrEquals", [args["column"], args["threshold"]])],
alias,
),
default_result_type="integer",
),
SnQLFunction(
"min",
required_args=[NumericColumn("column")],
snql_aggregate=lambda args, alias: Function("min", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"max",
required_args=[NumericColumn("column")],
snql_aggregate=lambda args, alias: Function("max", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
combinators=[
SnQLArrayCombinator("column", NumericColumn.numeric_array_columns)
],
),
SnQLFunction(
"avg",
required_args=[NumericColumn("column")],
snql_aggregate=lambda args, alias: Function("avg", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
redundant_grouping=True,
),
SnQLFunction(
"var",
required_args=[NumericColumn("column")],
snql_aggregate=lambda args, alias: Function("varSamp", [args["column"]], alias),
default_result_type="number",
redundant_grouping=True,
),
SnQLFunction(
"stddev",
required_args=[NumericColumn("column")],
snql_aggregate=lambda args, alias: Function(
"stddevSamp", [args["column"]], alias
),
default_result_type="number",
redundant_grouping=True,
),
SnQLFunction(
"cov",
required_args=[NumericColumn("column1"), NumericColumn("column2")],
snql_aggregate=lambda args, alias: Function(
"covarSamp", [args["column1"], args["column2"]], alias
),
default_result_type="number",
redundant_grouping=True,
),
SnQLFunction(
"corr",
required_args=[NumericColumn("column1"), NumericColumn("column2")],
snql_aggregate=lambda args, alias: Function(
"corr", [args["column1"], args["column2"]], alias
),
default_result_type="number",
redundant_grouping=True,
),
SnQLFunction(
"linear_regression",
required_args=[NumericColumn("column1"), NumericColumn("column2")],
snql_aggregate=lambda args, alias: Function(
"simpleLinearRegression", [args["column1"], args["column2"]], alias
),
default_result_type="number",
redundant_grouping=True,
),
SnQLFunction(
"sum",
required_args=[NumericColumn("column")],
snql_aggregate=lambda args, alias: Function("sum", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
combinators=[
SnQLArrayCombinator("column", NumericColumn.numeric_array_columns)
],
),
SnQLFunction(
"any",
required_args=[SnQLFieldColumn("column")],
# Not actually using `any` so that this function returns consistent results
snql_aggregate=lambda args, alias: Function("min", [args["column"]], alias),
result_type_fn=self.reflective_result_type(),
redundant_grouping=True,
),
SnQLFunction(
"eps",
snql_aggregate=lambda args, alias: function_aliases.resolve_eps(
args, alias, self.builder
),
optional_args=[IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
SnQLFunction(
"epm",
snql_aggregate=lambda args, alias: function_aliases.resolve_epm(
args, alias, self.builder
),
optional_args=[IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
SnQLFunction(
"compare_numeric_aggregate",
required_args=[
FunctionAliasArg("aggregate_alias"),
ConditionArg("condition"),
NumberRange("value", 0, None),
],
calculated_args=[
{
"name": "aggregate_function",
"fn": normalize_percentile_alias,
}
],
snql_aggregate=lambda args, alias: Function(
args["condition"],
[self.builder.resolve_function(args["aggregate_function"]), args["value"]],
alias,
),
default_result_type="number",
),
SnQLFunction(
"array_join",
required_args=[ColumnArg("column")],
snql_column=lambda args, alias: Function("arrayJoin", [args["column"]], alias),
default_result_type="string",
private=True,
),
SnQLFunction(
"absolute_correlation",
snql_aggregate=lambda _, alias: Function(
"abs",
[
Function(
"corr",
[
Function("toUnixTimestamp", [self.builder.column("timestamp")]),
self.builder.column("transaction.duration"),
],
),
],
alias,
),
default_result_type="number",
),
SnQLFunction(
"histogram",
required_args=[
NumericColumn("column", allow_array_value=True),
# the bucket_size and start_offset should already be adjusted
# using the multiplier before it is passed here
NumberRange("bucket_size", 0, None),
NumberRange("start_offset", 0, None),
NumberRange("multiplier", 1, None),
],
# floor((x * multiplier - start_offset) / bucket_size) * bucket_size + start_offset
snql_column=lambda args, alias: Function(
"plus",
[
Function(
"multiply",
[
Function(
"floor",
[
Function(
"divide",
[
Function(
"minus",
[
Function(
"multiply",
[
args["column"],
args["multiplier"],
],
),
args["start_offset"],
],
),
args["bucket_size"],
],
),
],
),
args["bucket_size"],
],
),
args["start_offset"],
],
alias,
),
default_result_type="number",
private=True,
),
SnQLFunction(
"spans_histogram",
required_args=[
SnQLStringArg("spans_op", True, True),
SnQLStringArg("spans_group"),
# the bucket_size and start_offset should already be adjusted
# using the multiplier before it is passed here
NumberRange("bucket_size", 0, None),
NumberRange("start_offset", 0, None),
NumberRange("multiplier", 1, None),
],
snql_column=lambda args, alias: Function(
"plus",
[
Function(
"multiply",
[
Function(
"floor",
[
Function(
"divide",
[
Function(
"minus",
[
Function(
"multiply",
[
Function(
"arrayJoin",
[
Function(
"arrayFilter",
[
Lambda(
[
"x",
"y",
"z",
],
Function(
"and",
[
Function(
"equals",
[
Identifier(
"y"
),
args[
"spans_op"
],
],
),
Function(
"equals",
[
Identifier(
"z",
),
args[
"spans_group"
],
],
),
],
),
),
Column(
"spans.exclusive_time"
),
Column(
"spans.op"
),
Column(
"spans.group"
),
],
)
],
),
args["multiplier"],
],
),
args["start_offset"],
],
),
args["bucket_size"],
],
),
],
),
args["bucket_size"],
],
),
args["start_offset"],
],
alias,
),
default_result_type="number",
private=True,
),
SnQLFunction(
"fn_span_count",
required_args=[
SnQLStringArg("spans_op", True, True),
SnQLStringArg("fn"),
],
snql_column=lambda args, alias: Function(
args["fn"],
[
Function(
"length",
[
Function(
"arrayFilter",
[
Lambda(
[
"x",
],
Function(
"equals",
[
Identifier("x"),
args["spans_op"],
],
),
),
Column("spans.op"),
],
)
],
"span_count",
)
],
alias,
),
),
SnQLFunction(
"floored_epm",
snql_aggregate=lambda args, alias: Function(
"pow",
[
10,
Function(
"floor",
[
Function(
"log10",
[
Function(
"divide",
[
Function("count", []),
Function("divide", [args["interval"], 60]),
],
)
],
)
],
),
],
alias,
),
optional_args=[IntervalDefault("interval", 1, None)],
default_result_type="number",
),
SnQLFunction(
"fn_span_exclusive_time",
required_args=[
SnQLStringArg("spans_op", True, True),
SnQLStringArg("spans_group"),
SnQLStringArg("fn"),
],
snql_column=lambda args, alias: Function(
args["fn"],
[
Function(
"arrayJoin",
[
Function(
"arrayFilter",
[
Lambda(
[
"x",
"y",
"z",
],
Function(
"and",
[
Function(
"equals",
[
Identifier("y"),
args["spans_op"],
],
),
Function(
"equals",
[
Identifier(
"z",
),
args["spans_group"],
],
),
],
),
),
Column("spans.exclusive_time"),
Column("spans.op"),
Column("spans.group"),
],
)
],
"exclusive_time",
)
],
alias,
),
default_result_type="number",
private=True,
),
SnQLFunction(
"performance_score",
required_args=[
NumericColumn("column"),
],
snql_aggregate=self._resolve_web_vital_score_function,
default_result_type="number",
),
SnQLFunction(
"opportunity_score",
required_args=[
NumericColumn("column"),
],
snql_aggregate=self._resolve_web_vital_opportunity_score_function,
default_result_type="number",
),
SnQLFunction(
"count_scores",
required_args=[
NumericColumn("column"),
],
snql_aggregate=self._resolve_count_scores_function,
default_result_type="integer",
),
SnQLFunction(
"examples",
required_args=[NumericColumn("column")],
optional_args=[with_default(1, NumberRange("count", 1, None))],
snql_aggregate=self._resolve_random_samples,
private=True,
),
SnQLFunction(
"rounded_timestamp",
required_args=[IntervalDefault("interval", 1, None)],
snql_column=lambda args, alias: function_aliases.resolve_rounded_timestamp(
args["interval"], alias
),
private=True,
),
SnQLFunction(
"column_hash",
# TODO: this supports only one column, but hash functions can support arbitrary parameters
required_args=[ColumnArg("column")],
snql_aggregate=lambda args, alias: Function(
"farmFingerprint64", # farmFingerprint64 aka farmHash64 is a newer, faster replacement for cityHash64
[args["column"]],
alias,
),
default_result_type="integer",
private=True,
),
SnQLFunction(
"upsampled_count",
required_args=[],
snql_aggregate=lambda args, alias: Function(
"toInt64",
[
Function(
"sum",
[
Function(
"ifNull",
[
Column(
"sample_weight",
entity=Entity("events", alias="events"),
),
1,
],
)
],
)
],
alias,
),
default_result_type="integer",
),
SnQLFunction(
"upsampled_eps",
snql_aggregate=lambda args, alias: function_aliases.resolve_upsampled_eps(
args, alias, self.builder
),
optional_args=[IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
SnQLFunction(
"upsampled_epm",
snql_aggregate=lambda args, alias: function_aliases.resolve_upsampled_epm(
args, alias, self.builder
),
optional_args=[IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
]
}
for alias, name in FUNCTION_ALIASES.items():
function_converter[alias] = function_converter[name].alias_as(alias)
return function_converter
@property
def orderby_converter(self) -> Mapping[str, Callable[[Direction], OrderBy]]:
return {
PROJECT_ALIAS: self._project_slug_orderby_converter,
PROJECT_NAME_ALIAS: self._project_slug_orderby_converter,
}
def _project_slug_orderby_converter(self, direction: Direction) -> OrderBy:
project_ids = {project_id for project_id in self.builder.params.project_ids}
# Try to reduce the size of the transform by using any existing conditions on projects
# Do not optimize projects list if conditions contain OR operator
if not self.builder.has_or_condition and len(self.builder.projects_to_filter) > 0:
project_ids &= self.builder.projects_to_filter
# Order by id so queries are consistent
projects = Project.objects.filter(id__in=project_ids).values("slug", "id").order_by("id")
return OrderBy(
Function(
"transform",
[
self.builder.column("project.id"),
[project["id"] for project in projects],
[project["slug"] for project in projects],
"",
],
),
direction,
)
# Field Aliases
def _resolve_project_slug_alias(self, alias: str) -> SelectType:
return field_aliases.resolve_project_slug_alias(self.builder, alias)
def _resolve_issue_id_alias(self, _: str) -> SelectType:
"""The state of having no issues is represented differently on transactions vs
other events. On the transactions table, it is represented by 0 whereas it is
represented by NULL everywhere else. We use coalesce here so we can treat this
consistently
"""
return Function("coalesce", [self.builder.column("issue.id"), 0], ISSUE_ID_ALIAS)
def _resolve_timestamp_to_hour_alias(self, _: str) -> SelectType:
return Function(
"toStartOfHour", [self.builder.column("timestamp")], TIMESTAMP_TO_HOUR_ALIAS
)
def _resolve_timestamp_to_day_alias(self, _: str) -> SelectType:
return Function("toStartOfDay", [self.builder.column("timestamp")], TIMESTAMP_TO_DAY_ALIAS)
def _resolve_user_display_alias(self, _: str) -> SelectType:
columns = ["user.email", "user.username", "user.id", "user.ip"]
return Function(
"coalesce", [self.builder.column(column) for column in columns], USER_DISPLAY_ALIAS
)
def _resolve_http_status_code(self, _: str) -> SelectType:
return Function(
"coalesce",
[
Function("nullif", [self.builder.column("http.status_code"), ""]),
self.builder.column("tags[http.status_code]"),
],
HTTP_STATUS_CODE_ALIAS,
)
@cached_property
def _resolve_project_threshold_config(self) -> SelectType:
project_thresholds = {}
project_threshold_config_keys = []
project_threshold_config_values = []
project_threshold_override_config_keys = []
project_threshold_override_config_values = []
org_id = self.builder.params.organization_id
project_ids = self.builder.params.project_ids
if org_id is not None:
project_threshold_configs = (
ProjectTransactionThreshold.objects.filter(
organization_id=org_id,
project_id__in=project_ids,
)
.order_by("project_id")
.values_list("project_id", "threshold", "metric")
)
transaction_threshold_configs = (
ProjectTransactionThresholdOverride.objects.filter(
organization_id=org_id,
project_id__in=project_ids,
)
.order_by("project_id")
.values_list("transaction", "project_id", "threshold", "metric")
)
num_project_thresholds = project_threshold_configs.count()
sentry_sdk.set_tag("project_threshold.count", num_project_thresholds)
sentry_sdk.set_tag(
"project_threshold.count.grouped",
format_grouped_length(num_project_thresholds, [10, 100, 250, 500]),
)
num_transaction_thresholds = transaction_threshold_configs.count()
sentry_sdk.set_tag("txn_threshold.count", num_transaction_thresholds)
sentry_sdk.set_tag(
"txn_threshold.count.grouped",
format_grouped_length(num_transaction_thresholds, [10, 100, 250, 500]),
)
if (
num_project_thresholds + num_transaction_thresholds
> MAX_QUERYABLE_TRANSACTION_THRESHOLDS
):
raise InvalidSearchQuery(
f"Exceeded {MAX_QUERYABLE_TRANSACTION_THRESHOLDS} configured transaction thresholds limit, try with fewer Projects."
)
# Arrays need to have toUint64 casting because clickhouse will define the type as the narrowest possible type
# that can store listed argument types, which means the comparison will fail because of mismatched types
for project_id, threshold, metric in project_threshold_configs:
metric_name = TRANSACTION_METRICS[metric]
if (
threshold == DEFAULT_PROJECT_THRESHOLD
and metric_name == DEFAULT_PROJECT_THRESHOLD_METRIC
):
# small optimization, if the configuration is equal to the default,
# we can skip it in the final query
continue
project_thresholds[project_id] = (metric_name, threshold)
project_threshold_config_keys.append(Function("toUInt64", [project_id]))
project_threshold_config_values.append((metric_name, threshold))
for transaction, project_id, threshold, metric in transaction_threshold_configs:
metric_name = TRANSACTION_METRICS[metric]
if (
project_id in project_thresholds
and threshold == project_thresholds[project_id][1]
and metric_name == project_thresholds[project_id][0]
):
# small optimization, if the configuration is equal to the project
# configs, we can skip it in the final query
continue
elif (
project_id not in project_thresholds
and threshold == DEFAULT_PROJECT_THRESHOLD
and metric_name == DEFAULT_PROJECT_THRESHOLD_METRIC
):
# small optimization, if the configuration is equal to the default
# and no project configs were set, we can skip it in the final query
continue
project_threshold_override_config_keys.append(
(Function("toUInt64", [project_id]), transaction)
)
project_threshold_override_config_values.append((metric_name, threshold))
project_threshold_config_index: SelectType = Function(
"indexOf",
[
project_threshold_config_keys,
self.builder.column("project_id"),
],
PROJECT_THRESHOLD_CONFIG_INDEX_ALIAS,
)
project_threshold_override_config_index: SelectType = Function(
"indexOf",
[
project_threshold_override_config_keys,
(self.builder.column("project_id"), self.builder.column("transaction")),
],
PROJECT_THRESHOLD_OVERRIDE_CONFIG_INDEX_ALIAS,
)
def _project_threshold_config(alias: str | None = None) -> SelectType:
if project_threshold_config_keys and project_threshold_config_values:
return Function(
"if",
[
Function(
"equals",
[
project_threshold_config_index,
0,
],
),
(DEFAULT_PROJECT_THRESHOLD_METRIC, DEFAULT_PROJECT_THRESHOLD),
Function(
"arrayElement",
[
project_threshold_config_values,
project_threshold_config_index,
],
),
],
alias,
)
return Function(
"tuple",
[DEFAULT_PROJECT_THRESHOLD_METRIC, DEFAULT_PROJECT_THRESHOLD],
alias,
)
if project_threshold_override_config_keys and project_threshold_override_config_values:
return Function(
"if",
[
Function(
"equals",
[
project_threshold_override_config_index,
0,
],
),
_project_threshold_config(),
Function(
"arrayElement",
[
project_threshold_override_config_values,
project_threshold_override_config_index,
],
),
],
PROJECT_THRESHOLD_CONFIG_ALIAS,
)
return _project_threshold_config(PROJECT_THRESHOLD_CONFIG_ALIAS)
def _resolve_team_key_transaction_alias(self, _: str) -> SelectType:
return field_aliases.resolve_team_key_transaction_alias(self.builder)
def _resolve_error_handled_alias(self, _: str) -> SelectType:
return Function("isHandled", [], ERROR_HANDLED_ALIAS)
def _resolve_error_unhandled_alias(self, _: str) -> SelectType:
return Function("notHandled", [], ERROR_UNHANDLED_ALIAS)
def _project_threshold_multi_if_function(self) -> SelectType:
"""Accessed by `_resolve_apdex_function` and `_resolve_count_miserable_function`,
this returns the right duration value (for example, lcp or duration) based
on project or transaction thresholds that have been configured by the user.
"""
return Function(
"multiIf",
[
Function(
"equals",
[
Function(
"tupleElement",
[self.builder.resolve_field_alias("project_threshold_config"), 1],
),
"lcp",
],
),
self.builder.column("measurements.lcp"),
self.builder.column("transaction.duration"),
],
)
def _resolve_aliased_division(self, dividend: str, divisor: str, alias: str) -> SelectType:
"""Given public aliases resolve division"""
return function_aliases.resolve_division(
self.builder.column(dividend), self.builder.column(divisor), alias
)
def _resolve_measurements_frames_slow_rate(self, _: str) -> SelectType:
return self._resolve_aliased_division(
"measurements.frames_slow", "measurements.frames_total", MEASUREMENTS_FRAMES_SLOW_RATE
)
def _resolve_measurements_frames_frozen_rate(self, _: str) -> SelectType:
return self._resolve_aliased_division(
"measurements.frames_frozen",
"measurements.frames_total",
MEASUREMENTS_FRAMES_FROZEN_RATE,
)
def _resolve_measurements_stall_percentage(self, _: str) -> SelectType:
return self._resolve_aliased_division(
"measurements.stall_total_time", "transaction.duration", MEASUREMENTS_STALL_PERCENTAGE
)
def _resolve_total_count(self, alias: str) -> SelectType:
"""This must be cached since it runs another query"""
self.builder.requires_other_aggregates = True
if self.total_count is not None:
return Function("toUInt64", [self.total_count], alias)
total_query = discover.DiscoverQueryBuilder(
dataset=self.builder.dataset,
params={},
snuba_params=self.builder.params,
selected_columns=["count()"],
)
total_query.columns += self.builder.resolve_groupby()
total_query.where = self.builder.where
total_results = total_query.run_query(Referrer.API_DISCOVER_TOTAL_COUNT_FIELD.value)
results = total_query.process_results(total_results)
if len(results["data"]) != 1:
self.total_count = 0
return Function("toUInt64", [0], alias)
self.total_count = results["data"][0]["count"]
return Function("toUInt64", [self.total_count], alias)
def _resolve_total_sum_transaction_duration(self, alias: str) -> SelectType:
"""This must be cached since it runs another query"""
self.builder.requires_other_aggregates = True
if self.total_sum_transaction_duration is not None:
return Function("toFloat64", [self.total_sum_transaction_duration], alias)
# TODO[Shruthi]: Figure out parametrization of the args to sum()
total_query = discover.DiscoverQueryBuilder(
dataset=self.builder.dataset,
params={},
snuba_params=self.builder.params,
selected_columns=["sum(transaction.duration)"],
)
total_query.columns += self.builder.resolve_groupby()
total_query.where = self.builder.where
total_results = total_query.run_query(
Referrer.API_DISCOVER_TOTAL_SUM_TRANSACTION_DURATION_FIELD.value
)
results = total_query.process_results(total_results)
if len(results["data"]) != 1:
self.total_sum_transaction_duration = 0
return Function("toFloat64", [0], alias)
self.total_sum_transaction_duration = results["data"][0]["sum_transaction_duration"]
return Function("toFloat64", [self.total_sum_transaction_duration], alias)
def _resolve_device_class(self, _: str) -> SelectType:
return Function(
"multiIf",
[
Function(
"in", [self.builder.column("tags[device.class]"), list(DEVICE_CLASS["low"])]
),
"low",
Function(
"in",
[
self.builder.column("tags[device.class]"),
list(DEVICE_CLASS["medium"]),
],
),
"medium",
Function(
"in",
[
self.builder.column("tags[device.class]"),
list(DEVICE_CLASS["high"]),
],
),
"high",
None,
],
DEVICE_CLASS_ALIAS,
)
# Functions
def _resolve_apdex_function(self, args: Mapping[str, str], alias: str) -> SelectType:
if args["satisfaction"]:
column = self.builder.column("transaction.duration")
satisfaction = int(args["satisfaction"])
else:
column = self._project_threshold_multi_if_function()
satisfaction = Function(
"tupleElement",
[self.builder.resolve_field_alias("project_threshold_config"), 2],
)
count_satisfaction = Function( # countIf(column<satisfaction)
"countIf", [Function("lessOrEquals", [column, satisfaction])]
)
count_tolerable = Function( # countIf(satisfaction<column<=satisfacitonx4)
"countIf",
[
Function(
"and",
[
Function("greater", [column, satisfaction]),
Function("lessOrEquals", [column, Function("multiply", [satisfaction, 4])]),
],
)
],
)
count_tolerable_div_2 = Function("divide", [count_tolerable, 2])
count_total = Function( # Only count if the column exists (doing >=0 covers that)
"countIf", [Function("greaterOrEquals", [column, 0])]
)
return function_aliases.resolve_division( # (satisfied + tolerable/2)/(total)
Function(
"plus",
[
count_satisfaction,
count_tolerable_div_2,
],
),
count_total,
alias,
# TODO(zerofill): This behaviour is incorrect if we remove zerofilling
# But need to do something reasonable here since we'll get a null row otherwise
fallback=0,
)
def _resolve_web_vital_function(
self, args: Mapping[str, str | Column], alias: str
) -> SelectType:
column = args["column"]
quality = args["quality"].lower()
assert isinstance(column, Column), "first arg to count_web_vitals must be a column"
if column.subscriptable != "measurements":
raise InvalidSearchQuery("count_web_vitals only supports measurements")
elif column.key not in VITAL_THRESHOLDS:
raise InvalidSearchQuery(f"count_web_vitals doesn't support {column.key}")
if quality == "good":
return Function(
"countIf",
[Function("less", [column, VITAL_THRESHOLDS[column.key]["meh"]])],
alias,
)
elif quality == "meh":
return Function(
"countIf",
[
Function(
"and",
[
Function(
"greaterOrEquals", [column, VITAL_THRESHOLDS[column.key]["meh"]]
),
Function("less", [column, VITAL_THRESHOLDS[column.key]["poor"]]),
],
)
],
alias,
)
elif quality == "poor":
return Function(
"countIf",
[
Function(
"greaterOrEquals",
[
column,
VITAL_THRESHOLDS[column.key]["poor"],
],
)
],
alias,
)
elif quality == "any":
return Function(
"countIf",
[
Function(
"greaterOrEquals",
[
column,
0,
],
)
],
alias,
)
return None
def _resolve_count_miserable_function(self, args: Mapping[str, str], alias: str) -> SelectType:
if args["satisfaction"]:
lhs = self.builder.column("transaction.duration")
rhs = int(args["tolerated"])
else:
lhs = self._project_threshold_multi_if_function()
rhs = Function(
"multiply",
[
Function(
"tupleElement",
[self.builder.resolve_field_alias("project_threshold_config"), 2],
),
4,
],
)
col = args["column"]
return Function("uniqIf", [col, Function("greater", [lhs, rhs])], alias)
def _resolve_user_misery_function(self, args: Mapping[str, str], alias: str) -> SelectType:
if satisfaction := args["satisfaction"]:
column = self.builder.column("transaction.duration")
count_miserable_agg = self.builder.resolve_function(
f"count_miserable(user,{satisfaction})"
)
else:
column = self._project_threshold_multi_if_function()
count_miserable_agg = self.builder.resolve_function("count_miserable(user)")
return Function(
"ifNull",
[
Function(
"divide",
[
Function(
"plus",
[
count_miserable_agg,
args["alpha"],
],
),
Function(
"plus",
[
Function(
"nullIf",
[
Function( # Only count if the column exists (doing >=0 covers that)
"uniqIf",
[
self.builder.column("user"),
Function("greater", [column, 0]),
],
),
0,
],
),
args["parameter_sum"],
],
),
],
),
0,
],
alias,
)
def _resolve_count_if(self, args: Mapping[str, str], alias: str) -> SelectType:
condition = args["normalized_condition"]
is_array_field = args["is_array_field"]
if is_array_field:
array_condition = Function(
"has",
[
args["column"],
args["typed_value"],
],
)
if condition == "notEquals":
return Function(
"countIf",
[
Function(
"equals",
[
array_condition,
0,
],
),
],
alias,
)
return Function(
"countIf",
[array_condition],
alias,
)
return Function(
"countIf",
[
Function(
condition,
[
args["column"],
args["typed_value"],
],
)
],
alias,
)
def _resolve_percentile(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
fixed_percentile: float | None = None,
) -> SelectType:
return (
Function(
"max",
[args["column"]],
alias,
)
if fixed_percentile == 1
else Function(
f'quantile({fixed_percentile if fixed_percentile is not None else args["percentile"]})',
[args["column"]],
alias,
)
)
def _resolve_web_vital_score_function(
self,
args: Mapping[str, Column],
alias: str,
) -> SelectType:
column = args["column"]
if column.key not in [
"score.lcp",
"score.fcp",
"score.fid",
"score.cls",
"score.ttfb",
]:
raise InvalidSearchQuery(
"performance_score only supports performance score measurements"
)
weight_column = self.builder.column(
"measurements." + column.key.replace("score", "score.weight")
)
return Function(
"greatest",
[
Function(
"least",
[
Function(
"divide",
[
Function(
"sum",
[column],
),
Function(
"sum",
[weight_column],
),
],
),
1.0,
],
),
0.0,
],
alias,
)
def _resolve_web_vital_opportunity_score_function(
self,
args: Mapping[str, Column],
alias: str,
) -> SelectType:
column = args["column"]
if column.key not in [
"score.lcp",
"score.fcp",
"score.fid",
"score.cls",
"score.ttfb",
"score.total",
]:
raise InvalidSearchQuery(
"opportunity_score only supports performance score measurements"
)
weight_column = (
1
if column.key == "score.total"
else self.builder.column("measurements." + column.key.replace("score", "score.weight"))
)
return Function(
"sum",
[Function("minus", [weight_column, Function("least", [1, column])])],
alias,
)
def _resolve_count_scores_function(self, args: Mapping[str, Column], alias: str) -> SelectType:
column = args["column"]
if column.key not in [
"score.total",
"score.lcp",
"score.fcp",
"score.fid",
"score.cls",
"score.ttfb",
]:
raise InvalidSearchQuery("count_scores only supports performance score measurements")
return Function(
"countIf",
[
Function(
"isNotNull",
[
column,
],
)
],
alias,
)
def _resolve_random_samples(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
) -> SelectType:
offset = 0 if self.builder.offset is None else self.builder.offset.offset
limit = 0 if self.builder.limit is None else self.builder.limit.limit
return function_aliases.resolve_random_samples(
[
# DO NOT change the order of these columns as it
# changes the order of the tuple in the response
# which WILL cause errors where it assumes this
# order
self.builder.resolve_column("timestamp"),
self.builder.resolve_column("span_id"),
args["column"],
],
alias,
offset,
limit,
size=int(args["count"]),
)
# Query Filters
def _project_slug_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.project_slug_converter(self.builder, search_filter)
def _release_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.release_filter_converter(self.builder, search_filter)
def _release_stage_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.release_stage_filter_converter(self.builder, search_filter)
def _semver_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.semver_filter_converter(self.builder, search_filter)
def _semver_package_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.semver_package_filter_converter(self.builder, search_filter)
def _semver_build_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.semver_build_filter_converter(self.builder, search_filter)
def _issue_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
if self.builder.builder_config.skip_field_validation_for_entity_subscription_deletion:
return None
operator = search_filter.operator
value = to_list(search_filter.value.value)
# `unknown` is a special value for when there is no issue associated with the event
group_short_ids = [v for v in value if v and v != "unknown"]
general_group_filter_values = [0 for v in value if not v or v == "unknown"]
if group_short_ids and self.builder.params.organization is not None:
try:
groups = Group.objects.by_qualified_short_id_bulk(
self.builder.params.organization.id,
group_short_ids,
)
except Exception:
raise InvalidSearchQuery(f"Invalid value '{group_short_ids}' for 'issue:' filter")
else:
general_group_filter_values.extend(sorted([group.id for group in groups]))
if general_group_filter_values:
return self.builder.convert_search_filter_to_condition(
SearchFilter(
SearchKey("issue.id"),
operator,
SearchValue(
general_group_filter_values
if search_filter.is_in_filter
else general_group_filter_values[0]
),
)
)
return None
def _message_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.message_filter_converter(self.builder, search_filter)
def _trace_parent_span_converter(self, search_filter: SearchFilter) -> WhereType | None:
if search_filter.operator in ("=", "!=") and search_filter.value.value == "":
return Condition(
Function("has", [Column("contexts.key"), TRACE_PARENT_SPAN_CONTEXT]),
Op.EQ if search_filter.operator == "!=" else Op.NEQ,
1,
)
else:
return self.builder.default_filter_converter(search_filter)
def _transaction_status_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.span_status_filter_converter(self.builder, search_filter)
def _performance_issue_ids_filter_converter(
self, search_filter: SearchFilter
) -> WhereType | None:
name = search_filter.key.name
operator = search_filter.operator
value = to_list(search_filter.value.value)
value_list_as_ints = []
lhs = self.builder.column(name)
for v in value:
if isinstance(v, str) and v.isdigit():
value_list_as_ints.append(int(v))
elif isinstance(v, int):
value_list_as_ints.append(v)
elif isinstance(v, str) and not v:
value_list_as_ints.append(0)
else:
raise InvalidSearchQuery("performance.issue_ids should be a number")
if search_filter.is_in_filter:
return Condition(
Function("hasAny", [lhs, value_list_as_ints]),
Op.EQ if operator == "IN" else Op.NEQ,
1,
)
elif search_filter.value.raw_value == "":
return Condition(
Function("notEmpty", [lhs]),
Op.EQ if operator == "!=" else Op.NEQ,
1,
)
else:
return Condition(
Function("has", [lhs, value_list_as_ints[0]]),
Op(search_filter.operator),
1,
)
def _issue_id_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
name = search_filter.key.name
value = search_filter.value.value
lhs = self.builder.column(name)
rhs = value
# Handle "has" queries
if (
search_filter.value.raw_value == ""
or search_filter.is_in_filter
and [v for v in value if not v]
):
if search_filter.is_in_filter:
rhs = [v if v else 0 for v in value]
else:
rhs = 0
# Skip isNull check on group_id value as we want to
# allow snuba's prewhere optimizer to find this condition.
return Condition(lhs, Op(search_filter.operator), rhs)
def _error_unhandled_filter_converter(
self,
search_filter: SearchFilter,
) -> WhereType | None:
value = search_filter.value.value
# Treat has filter as equivalent to handled
if search_filter.value.raw_value == "":
output = 0 if search_filter.operator == "!=" else 1
return Condition(Function("isHandled", []), Op.EQ, output)
if value in ("1", 1):
return Condition(Function("notHandled", []), Op.EQ, 1)
if value in ("0", 0):
return Condition(Function("isHandled", []), Op.EQ, 1)
raise InvalidSearchQuery(
"Invalid value for error.unhandled condition. Accepted values are 1, 0"
)
def _error_handled_filter_converter(
self,
search_filter: SearchFilter,
) -> WhereType | None:
value = search_filter.value.value
# Treat has filter as equivalent to handled
if search_filter.value.raw_value == "":
output = 1 if search_filter.operator == "!=" else 0
return Condition(Function("isHandled", []), Op.EQ, output)
if value in ("1", 1):
return Condition(Function("isHandled", []), Op.EQ, 1)
if value in ("0", 0):
return Condition(Function("notHandled", []), Op.EQ, 1)
raise InvalidSearchQuery(
"Invalid value for error.handled condition. Accepted values are 1, 0"
)
def _key_transaction_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.team_key_transaction_filter(self.builder, search_filter)
def _event_type_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
if self.builder.dataset == Dataset.Transactions:
if search_filter.operator in ["=", "IN"] and search_filter.value.value in [
"transaction",
["transaction"],
]:
return None
return self.builder.default_filter_converter(search_filter)
def _transaction_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
if self.builder.dataset == Dataset.Transactions:
operator = search_filter.operator
value = search_filter.value.value
if operator in ("=", "!=") and value == "":
# !has:transaction
if operator == "=":
raise InvalidSearchQuery(
"All events have a transaction so this query wouldn't return anything"
)
else:
# All events have a "transaction" since we map null -> unparam so no need to filter
return None
return self.builder.default_filter_converter(search_filter)
| DiscoverDatasetConfig |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/_stubs.py | {
"start": 587,
"end": 673
} | class ____(TypedDict):
DisplayName: Optional[str]
ID: Optional[str]
| OwnerTypeDef |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/context.py | {
"start": 4214,
"end": 6984
} | class ____(NamedTuple):
"""
Context of parsing for the Dag.
If these values are not None, they will contain the specific Dag and Task ID that Airflow is requesting to
execute. You can use these for optimizing dynamically generated Dag files.
You can obtain the current values via :py:func:`.get_parsing_context`.
"""
dag_id: str | None
task_id: str | None
_AIRFLOW_PARSING_CONTEXT_DAG_ID = "_AIRFLOW_PARSING_CONTEXT_DAG_ID"
_AIRFLOW_PARSING_CONTEXT_TASK_ID = "_AIRFLOW_PARSING_CONTEXT_TASK_ID"
def get_parsing_context() -> AirflowParsingContext:
"""Return the current (Dag) parsing context info."""
return AirflowParsingContext(
dag_id=os.environ.get(_AIRFLOW_PARSING_CONTEXT_DAG_ID),
task_id=os.environ.get(_AIRFLOW_PARSING_CONTEXT_TASK_ID),
)
# The 'template' argument is typed as Any because the jinja2.Template is too
# dynamic to be effectively type-checked.
def render_template(template: Any, context: MutableMapping[str, Any], *, native: bool) -> Any:
"""
Render a Jinja2 template with given Airflow context.
The default implementation of ``jinja2.Template.render()`` converts the
input context into dict eagerly many times, which triggers deprecation
messages in our custom context class. This takes the implementation apart
and retain the context mapping without resolving instead.
:param template: A Jinja2 template to render.
:param context: The Airflow task context to render the template with.
:param native: If set to *True*, render the template into a native type. A
Dag can enable this with ``render_template_as_native_obj=True``.
:returns: The render result.
"""
context = copy.copy(context)
env = template.environment
if template.globals:
context.update((k, v) for k, v in template.globals.items() if k not in context)
try:
nodes = template.root_render_func(env.context_class(env, context, template.name, template.blocks))
except Exception:
env.handle_exception() # Rewrite traceback to point to the template.
if native:
import jinja2.nativetypes
return jinja2.nativetypes.native_concat(nodes)
return "".join(nodes)
def render_template_as_native(template: jinja2.Template, context: Context) -> Any:
"""Shorthand to ``render_template(native=True)`` with better typing support."""
return render_template(template, cast("MutableMapping[str, Any]", context), native=True)
def render_template_to_string(template: jinja2.Template, context: Context) -> str:
"""Shorthand to ``render_template(native=False)`` with better typing support."""
return render_template(template, cast("MutableMapping[str, Any]", context), native=False)
| AirflowParsingContext |
python | dask__dask | dask/dataframe/dask_expr/_collection.py | {
"start": 95143,
"end": 148963
} | class ____(FrameBase):
"""DataFrame-like Expr Collection.
The constructor takes the expression that represents the query as input. The class
is not meant to be instantiated directly. Instead, use one of the IO connectors from
Dask.
"""
_accessors: ClassVar[set[str]] = set()
_partition_type = pd.DataFrame
@property
def shape(self):
return self.size // max(len(self.columns), 1), len(self.columns)
@property
def ndim(self):
"""Return dimensionality"""
return 2
@property
def empty(self):
# __getattr__ will be called after we raise this, so we'll raise it again from there
raise AttributeNotImplementedError(
"Checking whether a Dask DataFrame has any rows may be expensive. "
"However, checking the number of columns is fast. "
"Depending on which of these results you need, use either "
"`len(df.index) == 0` or `len(df.columns) == 0`"
)
@derived_from(pd.DataFrame)
def items(self):
for i, name in enumerate(self.columns):
yield (name, self.iloc[:, i])
@property
def axes(self):
return [self.index, self.columns]
def __contains__(self, key):
return key in self._meta
def __iter__(self):
return iter(self._meta)
def __dataframe__(self, *args, **kwargs):
from dask.dataframe.dask_expr._interchange import DaskDataFrameInterchange
return DaskDataFrameInterchange(self)
@derived_from(pd.DataFrame)
def iterrows(self):
frame = self.optimize()
for i in range(self.npartitions):
df = frame.get_partition(i).compute()
yield from df.iterrows()
@derived_from(pd.DataFrame)
def itertuples(self, index=True, name="Pandas"):
frame = self.optimize()
for i in range(self.npartitions):
df = frame.get_partition(i).compute()
yield from df.itertuples(index=index, name=name)
@property
def _elemwise(self):
return elemwise
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
# ufuncs work with 0-dimensional NumPy ndarrays
# so we don't want to raise NotImplemented
if isinstance(x, np.ndarray) and x.shape == ():
continue
elif not isinstance(
x, (Number, Scalar, FrameBase, Array, pd.DataFrame, pd.Series, pd.Index)
):
return NotImplemented
if method == "__call__":
if numpy_ufunc.signature is not None:
return NotImplemented
if numpy_ufunc.nout > 1:
# ufuncs with multiple output values
# are not yet supported for frames
return NotImplemented
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
else:
# ufunc methods are not yet supported for frames
return NotImplemented
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
else:
try:
import inspect
method_name = f"`{inspect.stack()[3][3]}`"
except IndexError:
method_name = "This method"
raise NotImplementedError(
f"{method_name} is not implemented for `dask.dataframe.DataFrame`."
)
return meta_frame_constructor(self)(array, index=index, columns=self.columns)
def _ipython_key_completions_(self):
return methods.tolist(self.columns)
def _repr_html_(self):
return self.to_html()
@derived_from(pd.DataFrame)
def assign(self, **pairs):
result = self
args = []
for k, v in pairs.items():
v = _maybe_from_pandas([v])[0]
if not isinstance(k, str):
raise TypeError(f"Column name cannot be type {type(k)}")
if callable(v):
result = new_collection(expr.Assign(result, *args))
args = []
result = new_collection(expr.Assign(result, k, v(result)))
continue
elif isinstance(v, (Scalar, Series)):
if isinstance(v, Series):
if not expr.are_co_aligned(result.expr, v.expr):
if len(args) > 0:
result = expr.Assign(result, *args)
args = []
result = new_collection(expr.AssignAlign(result, k, v.expr))
continue
elif not isinstance(v, FrameBase) and isinstance(v, Hashable):
pass
elif isinstance(v, Array):
if len(v.shape) > 1:
raise ValueError("Array assignment only supports 1-D arrays")
if v.npartitions != result.npartitions:
raise ValueError(
"Number of partitions do not match "
f"({v.npartitions} != {result.npartitions})"
)
v = from_dask_array(v, index=result.index, meta=result._meta)
else:
raise TypeError(f"Column assignment doesn't support type {type(v)}")
args.extend([k, v])
if len(args) > 0:
result = new_collection(expr.Assign(result, *args))
return result
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None, axis=None, **kwargs):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.clip, lower, upper, axis=axis)
return new_collection(self.expr.clip(lower, upper, axis))
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
suffixes=("_x", "_y"),
indicator=False,
shuffle_method=None,
npartitions=None,
broadcast=None,
):
"""Merge the DataFrame with another DataFrame
This will merge the two datasets, either on the indices, a certain column
in each dataset or the index in one dataset and the column in another.
Parameters
----------
right: dask.dataframe.DataFrame
how : {'left', 'right', 'outer', 'inner', 'leftsemi'}, default: 'inner'
How to handle the operation of the two objects:
- left: use calling frame's index (or column if on is specified)
- right: use other frame's index
- outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
- inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
- leftsemi: Choose all rows in left where the join keys can be found
in right. Won't duplicate rows if the keys are duplicated in right.
Drops all columns from right.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If on is None and not merging on indexes then this
defaults to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column to join on in the left DataFrame. Other than in pandas
arrays and lists are only support if their length is 1.
right_on : label or list, or array-like
Column to join on in the right DataFrame. Other than in pandas
arrays and lists are only support if their length is 1.
left_index : boolean, default False
Use the index from the left DataFrame as the join key.
right_index : boolean, default False
Use the index from the right DataFrame as the join key.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and
right side, respectively
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row. If string, column with
information on source of each row will be added to output DataFrame,
and column will be named value of string. Information column is
Categorical-type and takes on a value of "left_only" for observations
whose merge key only appears in `left` DataFrame, "right_only" for
observations whose merge key only appears in `right` DataFrame,
and "both" if the observation’s merge key is found in both.
npartitions: int or None, optional
The ideal number of output partitions. This is only utilised when
performing a hash_join (merging on columns only). If ``None`` then
``npartitions = max(lhs.npartitions, rhs.npartitions)``.
Default is ``None``.
shuffle_method: {'disk', 'tasks', 'p2p'}, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` and
``'p2p'``` for distributed operation. Will be inferred by your
current scheduler.
broadcast: boolean or float, optional
Whether to use a broadcast-based join in lieu of a shuffle-based
join for supported cases. By default, a simple heuristic will be
used to select the underlying algorithm. If a floating-point value
is specified, that number will be used as the ``broadcast_bias``
within the simple heuristic (a large number makes Dask more likely
to choose the ``broacast_join`` code path). See ``broadcast_join``
for more information.
Notes
-----
There are three ways to join dataframes:
1. Joining on indices. In this case the divisions are
aligned using the function ``dask.dataframe.multi.align_partitions``.
Afterwards, each partition is merged with the pandas merge function.
2. Joining one on index and one on column. In this case the divisions of
dataframe merged by index (:math:`d_i`) are used to divide the column
merged dataframe (:math:`d_c`) one using
``dask.dataframe.multi.rearrange_by_divisions``. In this case the
merged dataframe (:math:`d_m`) has the exact same divisions
as (:math:`d_i`). This can lead to issues if you merge multiple rows from
(:math:`d_c`) to one row in (:math:`d_i`).
3. Joining both on columns. In this case a hash join is performed using
``dask.dataframe.multi.hash_join``.
In some cases, you may see a ``MemoryError`` if the ``merge`` operation requires
an internal ``shuffle``, because shuffling places all rows that have the same
index in the same partition. To avoid this error, make sure all rows with the
same ``on``-column value can fit on a single partition.
"""
return merge(
self,
right,
how,
on,
left_on,
right_on,
left_index,
right_index,
suffixes,
indicator,
shuffle_method,
npartitions=npartitions,
broadcast=broadcast,
)
@derived_from(pd.DataFrame)
def join(
self,
other,
on=None,
how="left",
lsuffix="",
rsuffix="",
shuffle_method=None,
npartitions=None,
):
if not isinstance(other, list) and not is_dask_collection(other):
other = from_pandas(other, npartitions=1)
if (
not isinstance(other, list)
and not is_dataframe_like(other._meta)
and hasattr(other._meta, "name")
):
other = new_collection(expr.ToFrame(other))
if not isinstance(other, FrameBase):
if not isinstance(other, list) or not all(
isinstance(o, FrameBase) for o in other
):
raise ValueError("other must be DataFrame or list of DataFrames")
if how not in ("outer", "left"):
raise ValueError("merge_multi only supports left or outer joins")
other = [
from_pandas(o, npartitions=1) if not is_dask_collection(o) else o
for o in other
]
return new_collection(
JoinRecursive([self.expr] + [o.expr for o in other], how=how)
)
return self.merge(
right=other,
left_index=on is None,
right_index=True,
left_on=on,
how=how,
suffixes=(lsuffix, rsuffix),
shuffle_method=shuffle_method,
npartitions=npartitions,
)
@derived_from(pd.DataFrame)
def groupby(
self, by, group_keys=True, sort=None, observed=None, dropna=None, **kwargs
):
from dask.dataframe.dask_expr._groupby import GroupBy
if isinstance(by, FrameBase) and not isinstance(by, Series):
raise ValueError(
f"`by` must be a column name or list of columns, got {by}."
)
return GroupBy(
self,
by,
group_keys=group_keys,
sort=sort,
observed=observed,
dropna=dropna,
**kwargs,
)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)) and isinstance(value, DataFrame):
out = self.assign(**{k: value[c] for k, c in zip(key, value.columns)})
elif isinstance(key, pd.Index) and not isinstance(value, DataFrame):
out = self.assign(**dict.fromkeys(list(key), value))
elif (
is_dataframe_like(key)
or is_series_like(key)
or isinstance(key, (DataFrame, Series))
):
out = self.where(~key, value)
elif not isinstance(key, str):
raise NotImplementedError(f"Item assignment with {type(key)} not supported")
else:
out = self.assign(**{key: value})
self._expr = out._expr
def __delitem__(self, key):
columns = [c for c in self.columns if c != key]
out = self[columns]
self._expr = out._expr
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, "_expr").columns
except AttributeError:
columns = ()
# exclude protected attributes from setitem
if key in columns and key not in [
"divisions",
"dask",
"_name",
"_meta",
"_expr",
]:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
try:
# Prioritize `DataFrame` attributes
return object.__getattribute__(self, key)
except AttributeError as err:
try:
# Check if key is in columns if key
# is not a normal attribute
if key in self.expr._meta.columns:
return new_collection(self.expr[key])
raise err
except AttributeError:
# Fall back to `BaseFrame.__getattr__`
return super().__getattr__(key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(set(dir(expr.Expr)))
o.update(c for c in self.columns if (isinstance(c, str) and c.isidentifier()))
return list(o)
def map(self, func, na_action=None, meta=None):
if not PANDAS_GE_210:
raise NotImplementedError(
f"DataFrame.map requires pandas>=2.1.0, but pandas={PANDAS_VERSION} is "
"installed."
)
if meta is None:
meta = expr.emulate(M.map, self, func, na_action=na_action, udf=True)
warnings.warn(meta_warning(meta, method="map"))
return new_collection(expr.Map(self, arg=func, na_action=na_action, meta=meta))
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
return new_collection(
NLargest(self, n=n, _columns=columns, split_every=split_every)
)
@derived_from(pd.DataFrame)
def nsmallest(self, n=5, columns=None, split_every=None):
return new_collection(
NSmallest(self, n=n, _columns=columns, split_every=split_every)
)
@derived_from(pd.DataFrame)
def memory_usage(self, deep=False, index=True):
return new_collection(MemoryUsageFrame(self, deep=deep, _index=index))
@derived_from(pd.DataFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
other = self._create_alignable_frame(other, "outer")
if not expr.are_co_aligned(self.expr, other.expr):
return new_collection(
expr.CombineFrameAlign(self, other, func, fill_value, overwrite)
)
return new_collection(
expr.CombineFrame(self, other, func, fill_value, overwrite)
)
@derived_from(
pd.DataFrame,
inconsistencies="keep=False will raise a ``NotImplementedError``",
)
def drop_duplicates(
self,
subset=None,
split_every=None,
split_out=True,
shuffle_method=None,
ignore_index=False,
keep="first",
):
shuffle_method = _get_shuffle_preferring_order(shuffle_method)
if keep is False:
raise NotImplementedError("drop_duplicates with keep=False")
# Fail early if subset is not valid, e.g. missing columns
subset = _convert_to_list(subset)
meta_nonempty(self._meta).drop_duplicates(subset=subset, keep=keep)
return new_collection(
DropDuplicates(
self,
subset=subset,
ignore_index=ignore_index,
split_out=split_out,
split_every=split_every,
shuffle_method=shuffle_method,
keep=keep,
)
)
@insert_meta_param_description(pad=12)
def apply(self, function, *args, meta=no_default, axis=0, **kwargs):
"""Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
if axis == 0:
msg = (
"Dask DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)"
)
raise NotImplementedError(msg)
if meta is no_default:
meta = expr.emulate(
M.apply, self, function, args=args, udf=True, axis=axis, **kwargs
)
warnings.warn(meta_warning(meta))
return new_collection(
self.expr.apply(function, *args, meta=meta, axis=axis, **kwargs)
)
@derived_from(pd.DataFrame)
def dropna(self, how=no_default, subset=None, thresh=no_default):
if how is not no_default and thresh is not no_default:
raise TypeError(
"You cannot set both the how and thresh arguments at the same time."
)
subset = _convert_to_list(subset)
return new_collection(
expr.DropnaFrame(self, how=how, subset=subset, thresh=thresh)
)
@classmethod
def _validate_axis(cls, axis=0, numeric_axis: bool = True) -> None | Literal[0, 1]:
if axis not in (0, 1, "index", "columns", None):
raise ValueError(f"No axis named {axis}")
if numeric_axis:
num_axis: dict[str | None, Literal[0, 1]] = {"index": 0, "columns": 1}
return num_axis.get(axis, axis)
else:
return axis
@derived_from(pd.DataFrame, ua_args=["index"])
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
return new_collection(expr.RenameFrame(self, columns=columns))
@derived_from(pd.DataFrame)
def squeeze(self, axis=None):
if axis in [None, 1]:
if len(self.columns) == 1:
return self[self.columns[0]]
else:
return self
elif axis == 0:
raise NotImplementedError(
f"{type(self)} does not support squeeze along axis 0"
)
else:
raise ValueError(f"No axis {axis} for object type {type(self)}")
@derived_from(pd.DataFrame)
def explode(self, column):
column = _convert_to_list(column)
return new_collection(expr.ExplodeFrame(self, column=column))
@derived_from(pd.DataFrame)
def drop(self, labels=None, axis=0, columns=None, errors="raise"):
if columns is None and labels is None:
raise TypeError("must either specify 'columns' or 'labels'")
axis = _validate_axis(axis)
if axis == 1:
columns = labels or columns
elif axis == 0 and columns is None:
raise NotImplementedError(
"Drop currently only works for axis=1 or when columns is not None"
)
return new_collection(expr.Drop(self, columns=columns, errors=errors))
def to_parquet(self, path, **kwargs):
from dask.dataframe.dask_expr.io.parquet import to_parquet
return to_parquet(self, path, **kwargs)
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
columns = list(
self._meta.select_dtypes(include=include, exclude=exclude).columns
)
return new_collection(self.expr[columns])
@derived_from(pd.DataFrame)
def eval(self, expr, **kwargs):
if "inplace" in kwargs:
raise NotImplementedError("inplace is not supported for eval")
return new_collection(Eval(self, _expr=expr, expr_kwargs=kwargs))
def set_index(
self,
other,
drop=True,
sorted=False,
npartitions: int | None = None,
divisions=None,
sort: bool = True,
shuffle_method=None,
upsample: float = 1.0,
partition_size: float = 128e6,
append: bool = False,
**options,
):
"""Set the DataFrame index (row labels) using an existing column.
If ``sort=False``, this function operates exactly like ``pandas.set_index``
and sets the index on the DataFrame. If ``sort=True`` (default),
this function also sorts the DataFrame by the new index. This can have a
significant impact on performance, because joins, groupbys, lookups, etc.
are all much faster on that column. However, this performance increase
comes with a cost, sorting a parallel dataset requires expensive shuffles.
Often we ``set_index`` once directly after data ingest and filtering and
then perform many cheap computations off of the sorted dataset.
With ``sort=True``, this function is much more expensive. Under normal
operation this function does an initial pass over the index column to
compute approximate quantiles to serve as future divisions. It then passes
over the data a second time, splitting up each input partition into several
pieces and sharing those pieces to all of the output partitions now in
sorted order.
In some cases we can alleviate those costs, for example if your dataset is
sorted already then we can avoid making many small pieces or if you know
good values to split the new index column then we can avoid the initial
pass over the data. For example if your new index is a datetime index and
your data is already sorted by day then this entire operation can be done
for free. You can control these options with the following parameters.
Parameters
----------
other: string or Dask Series
Column to use as index.
drop: boolean, default True
Delete column to be used as the new index.
sorted: bool, optional
If the index column is already sorted in increasing order.
Defaults to False
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None, use the same as
the input. If 'auto' then decide by memory use.
Only used when ``divisions`` is not given. If ``divisions`` is given,
the number of output partitions will be ``len(divisions) - 1``.
divisions: list, optional
The "dividing lines" used to split the new index into partitions.
For ``divisions=[0, 10, 50, 100]``, there would be three output partitions,
where the new index contained [0, 10), [10, 50), and [50, 100), respectively.
See https://docs.dask.org/en/latest/dataframe-design.html#partitions.
If not given (default), good divisions are calculated by immediately computing
the data and looking at the distribution of its values. For large datasets,
this can be expensive.
Note that if ``sorted=True``, specified divisions are assumed to match
the existing partitions in the data; if this is untrue you should
leave divisions empty and call ``repartition`` after ``set_index``.
inplace: bool, optional
Modifying the DataFrame in place is not supported by Dask.
Defaults to False.
sort: bool, optional
If ``True``, sort the DataFrame by the new index. Otherwise
set the index on the individual existing partitions.
Defaults to ``True``.
shuffle_method: {'disk', 'tasks', 'p2p'}, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` and
``'p2p'`` for distributed operation. Will be inferred by your
current scheduler.
compute: bool, default False
Whether or not to trigger an immediate computation. Defaults to False.
Note, that even if you set ``compute=False``, an immediate computation
will still be triggered if ``divisions`` is ``None``.
partition_size: int, optional
Desired size of each partitions in bytes.
Only used when ``npartitions='auto'``
Examples
--------
>>> import dask
>>> ddf = dask.datasets.timeseries(start="2021-01-01", end="2021-01-07", freq="1h").reset_index()
>>> ddf2 = ddf.set_index("x")
>>> ddf2 = ddf.set_index(ddf.x)
>>> ddf2 = ddf.set_index(ddf.timestamp, sorted=True)
A common case is when we have a datetime column that we know to be
sorted and is cleanly divided by day. We can set this index for free
by specifying both that the column is pre-sorted and the particular
divisions along which is is separated
>>> import pandas as pd
>>> divisions = pd.date_range(start="2021-01-01", end="2021-01-07", freq='1D')
>>> divisions
DatetimeIndex(['2021-01-01', '2021-01-02', '2021-01-03', '2021-01-04',
'2021-01-05', '2021-01-06', '2021-01-07'],
dtype='datetime64[ns]', freq='D')
Note that ``len(divisions)`` is equal to ``npartitions + 1``. This is because ``divisions``
represents the upper and lower bounds of each partition. The first item is the
lower bound of the first partition, the second item is the lower bound of the
second partition and the upper bound of the first partition, and so on.
The second-to-last item is the lower bound of the last partition, and the last
(extra) item is the upper bound of the last partition.
>>> ddf2 = ddf.set_index("timestamp", sorted=True, divisions=divisions.tolist())
If you'll be running `set_index` on the same (or similar) datasets repeatedly,
you could save time by letting Dask calculate good divisions once, then copy-pasting
them to reuse. This is especially helpful running in a Jupyter notebook:
>>> ddf2 = ddf.set_index("name") # slow, calculates data distribution
>>> ddf2.divisions # doctest: +SKIP
["Alice", "Laura", "Ursula", "Zelda"]
>>> # ^ Now copy-paste this and edit the line above to:
>>> # ddf2 = ddf.set_index("name", divisions=["Alice", "Laura", "Ursula", "Zelda"])
"""
if isinstance(other, list) and len(other) == 1:
other = other[0]
if isinstance(other, list):
if any(isinstance(c, FrameBase) for c in other):
raise TypeError("List[FrameBase] not supported by set_index")
else:
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
f"You tried to index with this index: {other}\n"
"Indexes must be single columns only."
)
if isinstance(other, DataFrame):
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
f"You tried to index with a frame with these columns: {list(other.columns)}\n"
"Indexes must be single columns only."
)
if isinstance(other, Series):
if other._name == self.index._name:
return self
elif other == self.index.name:
return self
if divisions is not None:
check_divisions(divisions)
if (sorted or not sort) and npartitions is not None:
raise ValueError(
"Specifying npartitions with sort=False or sorted=True is not "
"supported. Call `repartition` afterwards."
)
if sorted:
if divisions is not None and len(divisions) - 1 != self.npartitions:
msg = (
"When doing `df.set_index(col, sorted=True, divisions=...)`, "
"divisions indicates known splits in the index column. In this "
"case divisions must be the same length as the existing "
"divisions in `df`\n\n"
"If the intent is to repartition into new divisions after "
"setting the index, you probably want:\n\n"
"`df.set_index(col, sorted=True).repartition(divisions=divisions)`"
)
raise ValueError(msg)
result = new_collection(
SetIndexBlockwise(
self, other, drop, new_divisions=divisions, append=append
)
)
return result.compute_current_divisions(set_divisions=True)
elif not sort:
return new_collection(
SetIndexBlockwise(self, other, drop, None, append=append)
)
return new_collection(
SetIndex(
self,
other,
drop,
user_divisions=divisions,
npartitions=npartitions,
upsample=upsample,
partition_size=partition_size,
shuffle_method=get_specified_shuffle(shuffle_method),
append=append,
options=options,
)
)
def sort_values(
self,
by: str | list[str],
npartitions: int | None = None,
ascending: bool | list[bool] = True,
na_position: Literal["first", "last"] = "last",
partition_size: float = 128e6,
sort_function: Callable[[pd.DataFrame], pd.DataFrame] | None = None,
sort_function_kwargs: Mapping[str, Any] | None = None,
upsample: float = 1.0,
ignore_index: bool | None = False,
shuffle_method: str | None = None,
**options,
):
"""Sort the dataset by a single column.
Sorting a parallel dataset requires expensive shuffles and is generally
not recommended. See ``set_index`` for implementation details.
Parameters
----------
by: str or list[str]
Column(s) to sort by.
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None, use the same as
the input. If 'auto' then decide by memory use.
ascending: bool, optional
Sort ascending vs. descending.
Defaults to True.
na_position: {'last', 'first'}, optional
Puts NaNs at the beginning if 'first', puts NaN at the end if 'last'.
Defaults to 'last'.
sort_function: function, optional
Sorting function to use when sorting underlying partitions.
If None, defaults to ``M.sort_values`` (the partition library's
implementation of ``sort_values``).
sort_function_kwargs: dict, optional
Additional keyword arguments to pass to the partition sorting function.
By default, ``by``, ``ascending``, and ``na_position`` are provided.
Examples
--------
>>> df2 = df.sort_values('x') # doctest: +SKIP
"""
if na_position not in ("first", "last"):
raise ValueError("na_position must be either 'first' or 'last'")
if not isinstance(by, list):
by = [by]
if any(not isinstance(b, str) for b in by):
raise NotImplementedError(
"Dataframes only support sorting by named columns which must be passed as a "
"string or a list of strings.\n"
f"You passed {by}"
)
if not isinstance(ascending, bool) and not len(ascending) == len(by):
raise ValueError(f"Length of {ascending=} != length of {by=}")
return new_collection(
SortValues(
self,
by,
ascending,
na_position,
npartitions,
partition_size,
sort_function,
sort_function_kwargs,
upsample,
ignore_index,
get_specified_shuffle(shuffle_method),
options=options,
)
)
def query(self, expr, **kwargs):
"""Filter dataframe with complex expression
Blocked version of pd.DataFrame.query
Parameters
----------
expr: str
The query string to evaluate.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks.
Dask does not fully support referring to variables using the '@' character,
use f-strings or the ``local_dict`` keyword argument instead.
Notes
-----
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set ``numexpr`` to use a
single thread:
.. code-block:: python
import numexpr
numexpr.set_num_threads(1)
See also
--------
pandas.DataFrame.query
pandas.eval
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 1, 2],
... 'y': [1, 2, 3, 4],
... 'z z': [4, 3, 2, 1]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Refer to column names directly:
>>> ddf.query('y > x').compute()
x y z z
2 1 3 2
3 2 4 1
Refer to column name using backticks:
>>> ddf.query('`z z` > x').compute()
x y z z
0 1 1 4
1 2 2 3
2 1 3 2
Refer to variable name using f-strings:
>>> value = 1
>>> ddf.query(f'x == {value}').compute()
x y z z
0 1 1 4
2 1 3 2
Refer to variable name using ``local_dict``:
>>> ddf.query('x == @value', local_dict={"value": value}).compute()
x y z z
0 1 1 4
2 1 3 2
"""
return new_collection(Query(self, expr, kwargs))
@derived_from(pd.DataFrame)
def mode(self, dropna=True, split_every=False, numeric_only=False):
modes = []
for _, col in self.items():
if numeric_only and not pd.api.types.is_numeric_dtype(col.dtype):
continue
modes.append(col.mode(dropna=dropna, split_every=split_every))
return concat(modes, axis=1)
@derived_from(pd.DataFrame)
def add_prefix(self, prefix):
return new_collection(expr.AddPrefix(self, prefix))
@derived_from(pd.DataFrame)
def add_suffix(self, suffix):
return new_collection(expr.AddSuffix(self, suffix))
def pivot_table(self, index, columns, values, aggfunc="mean"):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
return pivot_table(self, index, columns, values, aggfunc)
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
Only indexing the column positions is supported. Trying to select
row positions will raise a ValueError.
See :ref:`dataframe.indexing` for more.
Examples
--------
>>> df.iloc[:, [2, 0, 1]] # doctest: +SKIP
"""
from dask.dataframe.dask_expr._indexing import ILocIndexer
return ILocIndexer(self)
def _comparison_op(self, expr_cls, other, level, axis):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
return new_collection(expr_cls(self, other, axis))
def lt(self, other, level=None, axis=0):
return self._comparison_op(expr.LTFrame, other, level, axis)
def le(self, other, level=None, axis=0):
return self._comparison_op(expr.LEFrame, other, level, axis)
def gt(self, other, level=None, axis=0):
return self._comparison_op(expr.GTFrame, other, level, axis)
def ge(self, other, level=None, axis=0):
return self._comparison_op(expr.GEFrame, other, level, axis)
def ne(self, other, level=None, axis=0):
return self._comparison_op(expr.NEFrame, other, level, axis)
def eq(self, other, level=None, axis=0):
return self._comparison_op(expr.EQFrame, other, level, axis)
def categorize(self, columns=None, index=None, split_every=None, **kwargs):
"""Convert columns of the DataFrame to category dtype.
.. warning:: This method eagerly computes the categories of the chosen columns.
Parameters
----------
columns : list, optional
A list of column names to convert to categoricals. By default any
column with an object dtype is converted to a categorical, and any
unknown categoricals are made known.
index : bool, optional
Whether to categorize the index. By default, object indices are
converted to categorical, and unknown categorical indices are made
known. Set True to always categorize the index, False to never.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
kwargs
Keyword arguments are passed on to compute.
"""
df = self
meta = df._meta
if columns is None:
columns = list(meta.select_dtypes(["object", "string", "category"]).columns)
elif is_scalar(columns):
columns = [columns]
# Filter out known categorical columns
columns = [
c
for c in columns
if not (is_categorical_dtype(meta[c]) and has_known_categories(meta[c]))
]
if index is not False:
if is_categorical_dtype(meta.index):
index = not has_known_categories(meta.index)
elif index is None:
index = str(meta.index.dtype) in ("object", "string")
# Nothing to do
if not len(columns) and index is False:
return df
from dask.dataframe.dask_expr._collection import new_collection
# Eagerly compute the categories
categories, index = new_collection(
GetCategories(self, columns=columns, index=index, split_every=split_every)
).compute()
# Some operations like get_dummies() rely on the order of categories
categories = {k: v.sort_values() for k, v in categories.items()}
# Categorize each partition
return new_collection(Categorize(self, categories, index))
@derived_from(pd.DataFrame)
def nunique(self, axis=0, dropna=True, split_every=False):
if axis == 1:
return new_collection(expr.NUniqueColumns(self, axis=axis, dropna=dropna))
else:
return concat(
[
col.nunique(dropna=dropna, split_every=split_every).to_series(name)
for name, col in self.items()
]
)
def quantile(self, q=0.5, axis=0, numeric_only=False, method="default"):
"""Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
allowed_methods = ["default", "dask", "tdigest"]
if method not in allowed_methods:
raise ValueError("method can only be 'default', 'dask' or 'tdigest'")
meta = make_meta(
meta_nonempty(self._meta).quantile(
q=q, numeric_only=numeric_only, axis=axis
)
)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return self.map_partitions(
M.quantile,
q,
axis,
enforce_metadata=False,
meta=meta,
numeric_only=numeric_only,
)
if numeric_only:
frame = self.select_dtypes(
"number", exclude=[np.timedelta64, np.datetime64]
)
else:
frame = self
collections = []
for _, col in frame.items():
collections.append(col.quantile(q=q, method=method))
if len(collections) > 0 and isinstance(collections[0], Scalar):
return _from_scalars(collections, meta, frame.expr.columns)
return concat(collections, axis=1)
@derived_from(pd.DataFrame)
def median(self, axis=0, numeric_only=False):
if axis == 1 or self.npartitions == 1:
return self.median_approximate(axis=axis, numeric_only=numeric_only)
raise NotImplementedError(
"Dask doesn't implement an exact median in all cases as this is hard to do in parallel. "
"See the `median_approximate` method instead, which uses an approximate algorithm."
)
def median_approximate(self, axis=0, method="default", numeric_only=False):
"""Return the approximate median of the values over the requested axis.
Parameters
----------
axis : {0, 1, "index", "columns"} (default 0)
0 or ``"index"`` for row-wise, 1 or ``"columns"`` for column-wise
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use Dask's internal custom
algorithm (``"dask"``). If set to ``"tdigest"`` will use tdigest
for floats and ints and fallback to the ``"dask"`` otherwise.
"""
return self.quantile(
axis=axis, method=method, numeric_only=numeric_only
).rename(None)
@derived_from(pd.DataFrame)
def describe(
self,
split_every=False,
percentiles=None,
percentiles_method="default",
include=None,
exclude=None,
):
# TODO: duplicated columns
if include is None and exclude is None:
_include = [np.number, np.timedelta64, np.datetime64]
columns = self._meta.select_dtypes(include=_include).columns
if len(columns) == 0:
columns = self._meta.columns
elif include == "all":
if exclude is not None:
raise ValueError("exclude must be None when include is 'all'")
columns = self._meta.columns
else:
columns = self._meta.select_dtypes(include=include, exclude=exclude).columns
stats = [
self[col].describe(
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
)
for col in columns
]
return concat(stats, axis=1)
@derived_from(pd.DataFrame)
def pop(self, item):
out = self[item]
self._expr = expr.Drop(self, columns=[item])
return out
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self)).replace("._collection", "")]
if len(self.columns) == 0:
lines.append(f"{type(self.index._meta).__name__}: 0 entries")
lines.append(f"Empty {type(self).__name__}")
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({"index": self.index, "count": self.count()})
if memory_usage:
computations["memory_usage"] = self.memory_usage(deep=True, index=True)
computations = dict(zip(computations.keys(), compute(*computations.values())))
if verbose:
import textwrap
index = computations["index"]
counts = computations["count"]
lines.append(index_summary(index))
lines.append(f"Data columns (total {len(self.columns)} columns):")
from pandas.io.formats.printing import pprint_thing
space = max(len(pprint_thing(k)) for k in self.columns) + 1
column_width = max(space, 7)
header = (
textwrap.dedent(
"""\
# {{column:<{column_width}}} Non-Null Count Dtype
--- {{underl:<{column_width}}} -------------- -----"""
)
.format(column_width=column_width)
.format(column="Column", underl="------")
)
column_template = textwrap.dedent(
f"""\
{{i:^3}} {{name:<{column_width}}} {{count}} non-null {{dtype}}"""
)
column_info = [
column_template.format(
i=pprint_thing(i),
name=pprint_thing(name),
count=pprint_thing(count),
dtype=pprint_thing(dtype),
)
for i, (name, count, dtype) in enumerate(
# NOTE: Use `counts.values` for cudf support
zip(self.columns, counts.values, self.dtypes)
)
]
lines.extend(header.split("\n"))
else:
column_info = [index_summary(self.columns, name="Columns")]
lines.extend(column_info)
dtype_counts = [
f"{value}({count})"
for value, count in sorted(self.dtypes.value_counts().items(), key=str)
]
lines.append("dtypes: {}".format(", ".join(dtype_counts)))
if memory_usage:
memory_int = computations["memory_usage"].sum()
lines.append(f"memory usage: {memory_repr(memory_int)}\n")
put_lines(buf, lines)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None, numeric_only=False, split_every=False):
return self._cov(min_periods, numeric_only, split_every)
@derived_from(pd.DataFrame)
def corr(
self,
method="pearson",
min_periods=None,
numeric_only=False,
split_every=False,
):
return self._corr(method, min_periods, numeric_only, split_every)
@derived_from(pd.DataFrame)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data().to_string(max_rows=max_rows, show_dimensions=False)
@derived_from(pd.DataFrame)
def to_html(self, max_rows=5):
# pd.Series doesn't have html repr
data = self._repr_data().to_html(max_rows=max_rows, show_dimensions=False)
n_expr = len({e._name for e in self.walk()})
return get_template("dataframe.html.j2").render(
data=data,
name=self._name,
layers=maybe_pluralize(n_expr, "expression"),
)
@derived_from(pd.DataFrame)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
def _repr_data(self):
meta = self._meta
index = self._repr_divisions
cols = meta.columns
if len(cols) == 0:
series_df = pd.DataFrame([[]] * len(index), columns=cols, index=index)
else:
series_df = pd.concat(
[_repr_data_series(s, index=index) for _, s in meta.items()], axis=1
)
return series_df
| DataFrame |
python | google__pytype | pytype/metrics_test.py | {
"start": 3206,
"end": 3934
} | class ____(unittest.TestCase):
"""Tests for StopWatch."""
def setUp(self):
super().setUp()
metrics._prepare_for_test()
def test_stopwatch(self):
c = metrics.StopWatch("foo")
with c:
pass
self.assertGreaterEqual(c._total, 0)
def test_merge(self):
c1 = metrics.StopWatch("foo")
c2 = metrics.StopWatch("bar")
with c1:
pass
with c2:
pass
t1 = c1._total
t2 = c2._total
c1._merge(c2)
t3 = c1._total
self.assertGreaterEqual(t3, t1)
self.assertGreaterEqual(t3, t2)
def test_summary(self):
c1 = metrics.StopWatch("foo")
with c1:
pass
self.assertIsInstance(c1._summary(), str)
self.assertIsInstance(str(c1), str)
| StopWatchTest |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 32906,
"end": 44072
} | class ____:
"""Some tests to show that fisher_exact() works correctly.
Note that in SciPy 0.9.0 this was not working well for large numbers due to
inaccuracy of the hypergeom distribution (see #1218). Fixed now.
Also note that R and SciPy have different argument formats for their
hypergeometric distribution functions.
R:
> phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)
[1] 1.701815e-09
"""
def test_basic(self):
fisher_exact = stats.fisher_exact
res = fisher_exact([[14500, 20000], [30000, 40000]])[1]
assert_approx_equal(res, 0.01106, significant=4)
res = fisher_exact([[100, 2], [1000, 5]])[1]
assert_approx_equal(res, 0.1301, significant=4)
res = fisher_exact([[2, 7], [8, 2]])[1]
assert_approx_equal(res, 0.0230141, significant=6)
res = fisher_exact([[5, 1], [10, 10]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 15], [20, 20]])[1]
assert_approx_equal(res, 0.0958044, significant=6)
res = fisher_exact([[5, 16], [20, 25]])[1]
assert_approx_equal(res, 0.1725862, significant=6)
res = fisher_exact([[10, 5], [10, 1]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 0], [1, 4]])[1]
assert_approx_equal(res, 0.04761904, significant=6)
res = fisher_exact([[0, 1], [3, 2]])[1]
assert_approx_equal(res, 1.0)
res = fisher_exact([[0, 2], [6, 4]])[1]
assert_approx_equal(res, 0.4545454545)
res = fisher_exact([[2, 7], [8, 2]])
assert_approx_equal(res[1], 0.0230141, significant=6)
assert_approx_equal(res[0], 4.0 / 56)
def test_precise(self):
# results from R
#
# R defines oddsratio differently (see Notes section of fisher_exact
# docstring), so those will not match. We leave them in anyway, in
# case they will be useful later on. We test only the p-value.
tablist = [
([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),
([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),
([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),
([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),
([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),
([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),
([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),
([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),
([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),
([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),
([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))
]
for table, res_r in tablist:
res = stats.fisher_exact(np.asarray(table))
np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,
verbose=True)
def test_gh4130(self):
# Previously, a fudge factor used to distinguish between theoretically
# and numerically different probability masses was 1e-4; it has been
# tightened to fix gh4130. Accuracy checked against R fisher.test.
# options(digits=16)
# table <- matrix(c(6, 108, 37, 200), nrow = 2)
# fisher.test(table, alternative = "t")
x = [[6, 37], [108, 200]]
res = stats.fisher_exact(x)
assert_allclose(res[1], 0.005092697748126)
# case from https://github.com/brentp/fishers_exact_test/issues/27
# That package has an (absolute?) fudge factor of 1e-6; too big
x = [[22, 0], [0, 102]]
res = stats.fisher_exact(x)
assert_allclose(res[1], 7.175066786244549e-25)
# case from https://github.com/brentp/fishers_exact_test/issues/1
x = [[94, 48], [3577, 16988]]
res = stats.fisher_exact(x)
assert_allclose(res[1], 2.069356340993818e-37)
def test_gh9231(self):
# Previously, fisher_exact was extremely slow for this table
# As reported in gh-9231, the p-value should be very nearly zero
x = [[5829225, 5692693], [5760959, 5760959]]
res = stats.fisher_exact(x)
assert_allclose(res[1], 0, atol=1e-170)
@pytest.mark.slow
def test_large_numbers(self):
# Test with some large numbers. Regression test for #1401
pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R
for pval, num in zip(pvals, [75, 76, 77]):
res = stats.fisher_exact([[17704, 496], [1065, num]])[1]
assert_approx_equal(res, pval, significant=4)
res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1]
assert_approx_equal(res, 0.2751, significant=4)
def test_raises(self):
# test we raise an error for wrong number of dimensions.
message = "The input `table` must have two dimensions."
with pytest.raises(ValueError, match=message):
stats.fisher_exact(np.arange(6))
def test_row_or_col_zero(self):
tables = ([[0, 0], [5, 10]],
[[5, 10], [0, 0]],
[[0, 5], [0, 10]],
[[5, 0], [10, 0]])
for table in tables:
oddsratio, pval = stats.fisher_exact(table)
assert_equal(pval, 1.0)
assert_equal(oddsratio, np.nan)
def test_less_greater(self):
tables = (
# Some tables to compare with R:
[[2, 7], [8, 2]],
[[200, 7], [8, 300]],
[[28, 21], [6, 1957]],
[[190, 800], [200, 900]],
# Some tables with simple exact values
# (includes regression test for ticket #1568):
[[0, 2], [3, 0]],
[[1, 1], [2, 1]],
[[2, 0], [1, 2]],
[[0, 1], [2, 3]],
[[1, 0], [1, 4]],
)
pvals = (
# from R:
[0.018521725952066501, 0.9990149169715733],
[1.0, 2.0056578803889148e-122],
[1.0, 5.7284374608319831e-44],
[0.7416227, 0.2959826],
# Exact:
[0.1, 1.0],
[0.7, 0.9],
[1.0, 0.3],
[2./3, 1.0],
[1.0, 1./3],
)
for table, pval in zip(tables, pvals):
res = []
res.append(stats.fisher_exact(table, alternative="less")[1])
res.append(stats.fisher_exact(table, alternative="greater")[1])
assert_allclose(res, pval, atol=0, rtol=1e-7)
def test_gh3014(self):
# check if issue #3014 has been fixed.
# before, this would have risen a ValueError
odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]])
@pytest.mark.parametrize("alternative", ['two-sided', 'less', 'greater'])
def test_result(self, alternative):
table = np.array([[14500, 20000], [30000, 40000]])
res = stats.fisher_exact(table, alternative=alternative)
assert_equal((res.statistic, res.pvalue), res)
def test_input_validation_edge_cases_rxc(self):
rng = np.random.default_rng(2345783457834572345)
table = np.asarray([[2, 7], [8, 2]])
message = r"`alternative` must be the default \(None\) unless..."
with pytest.raises(ValueError, match=message):
method = stats.PermutationMethod(rng=rng)
stats.fisher_exact(table, method=method, alternative='less')
message = "...not recognized; if provided, `method` must be an..."
with pytest.raises(ValueError, match=message):
method = stats.BootstrapMethod(rng=rng)
stats.fisher_exact(table, method=method)
message = "If the `method` argument of `fisher_exact` is an..."
with pytest.raises(ValueError, match=message):
method = stats.MonteCarloMethod(rvs=stats.norm.rvs)
stats.fisher_exact(table, method=method)
message = "`table` must have at least one row and one column."
with pytest.raises(ValueError, match=message):
stats.fisher_exact(np.zeros((0, 1)))
# Specical case: when there is only one table with given marginals, the
# PMF of that case is 1.0, so the p-value is 1.0
np.testing.assert_equal(stats.fisher_exact([[1, 2, 3]]), (1, 1))
np.testing.assert_equal(stats.fisher_exact([[1], [2], [3]]), (1, 1))
np.testing.assert_equal(stats.fisher_exact(np.zeros((2, 3))), (1, 1))
@pytest.mark.fail_slow(10)
@pytest.mark.slow()
def test_resampling_2x2(self):
rng = np.random.default_rng(2345783457834572345)
table = np.asarray([[2, 7], [8, 2]])
ref = stats.fisher_exact(table)
ref_pvalue = ref.pvalue
ref_stat = stats.random_table(table.sum(axis=1), table.sum(axis=0)).pmf(table)
method = stats.MonteCarloMethod(rng=rng)
res = stats.fisher_exact(table, method=method)
assert_allclose(res.pvalue, ref_pvalue, atol=0.0025)
assert_equal(res.statistic, ref_stat)
method = stats.PermutationMethod(rng=rng)
res = stats.fisher_exact(table, method=method)
assert_allclose(res.pvalue, ref.pvalue, atol=0.0025)
assert_equal(res.statistic, ref_stat)
@pytest.mark.fail_slow(10)
@pytest.mark.slow()
def test_resampling_rxc(self):
# Compare against R fisher.exact
# options(digits=16)
# MP6 < - rbind(
# c(1, 2, 2, 1, 1, 0, 1),
# c(2, 0, 0, 2, 3, 0, 0),
# c(0, 1, 1, 1, 2, 7, 3),
# c(1, 1, 2, 0, 0, 0, 1),
# c(0, 1, 1, 1, 1, 0, 0))
# fisher.test(MP6)
table = [[1, 2, 2, 1, 1, 0, 1],
[2, 0, 0, 2, 3, 0, 0],
[0, 1, 1, 1, 2, 7, 3],
[1, 1, 2, 0, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0]]
table = np.asarray(table)
ref_pvalue = 0.03928964365533
rng = np.random.default_rng(3928964365533)
method = stats.PermutationMethod(rng=rng)
res = stats.fisher_exact(table, method=method)
assert_allclose(res.pvalue, ref_pvalue, atol=5e-4)
method = stats.MonteCarloMethod(rng=rng, n_resamples=99999)
res = stats.fisher_exact(table, method=method)
assert_allclose(res.pvalue, ref_pvalue, atol=5e-4)
@pytest.mark.xslow()
def test_resampling_exact_2x2(self):
# Test that exact permutation p-value matches result of `fisher_exact`
rng = np.random.default_rng(2345783457834572345)
method = stats.PermutationMethod(rng=rng)
for a in range(1, 3):
for b in range(1, 3):
for c in range(1, 3):
for d in range(1, 4):
table = np.asarray([[a, b], [c, d]])
ref = stats.fisher_exact(table)
res = stats.fisher_exact(table, method=method)
assert_allclose(res.pvalue, ref.pvalue, atol=1e-14)
| TestFisherExact |
python | kamyu104__LeetCode-Solutions | Python/maximum-increasing-triplet-value.py | {
"start": 758,
"end": 1297
} | class ____(object):
def maximumTripletValue(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left = SortedList()
right = SortedList(nums[i] for i in xrange(1, len(nums)))
result = 0
for i in xrange(1, len(nums)-1):
left.add(nums[i-1])
right.remove(nums[i])
j = left.bisect_left(nums[i])
if j-1 >= 0 and right[-1] > nums[i]:
result = max(result, left[j-1]-nums[i]+right[-1])
return result
| Solution2 |
python | doocs__leetcode | solution/1200-1299/1266.Minimum Time Visiting All Points/Solution.py | {
"start": 0,
"end": 204
} | class ____:
def minTimeToVisitAllPoints(self, points: List[List[int]]) -> int:
return sum(
max(abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) for p1, p2 in pairwise(points)
)
| Solution |
python | getsentry__sentry | src/sentry/monitors/serializers.py | {
"start": 4598,
"end": 4935
} | class ____(TypedDict):
schedule_type: Literal["crontab", "interval"]
schedule: str | tuple[int, IntervalNames]
checkin_margin: int | None
max_runtime: int | None
timezone: str | None
failure_issue_threshold: int | None
recovery_threshold: int | None
alert_rule_id: int | None
| MonitorConfigSerializerResponse |
python | run-llama__llama_index | llama-index-core/llama_index/core/evaluation/retrieval/metrics.py | {
"start": 3010,
"end": 6010
} | class ____(BaseRetrievalMetric):
"""
MRR (Mean Reciprocal Rank) metric with two calculation options.
- The default method calculates the reciprocal rank of the first relevant retrieved document.
- The more granular method sums the reciprocal ranks of all relevant retrieved documents and divides by the count of relevant documents.
Attributes:
metric_name (str): The name of the metric.
use_granular_mrr (bool): Determines whether to use the granular method for calculation.
"""
metric_name: ClassVar[str] = "mrr"
use_granular_mrr: bool = False
def compute(
self,
query: Optional[str] = None,
expected_ids: Optional[List[str]] = None,
retrieved_ids: Optional[List[str]] = None,
expected_texts: Optional[List[str]] = None,
retrieved_texts: Optional[List[str]] = None,
**kwargs: Any,
) -> RetrievalMetricResult:
"""
Compute MRR based on the provided inputs and selected method.
Parameters
----------
query (Optional[str]): The query string (not used in the current implementation).
expected_ids (Optional[List[str]]): Expected document IDs.
retrieved_ids (Optional[List[str]]): Retrieved document IDs.
expected_texts (Optional[List[str]]): Expected texts (not used in the current implementation).
retrieved_texts (Optional[List[str]]): Retrieved texts (not used in the current implementation).
Raises
------
ValueError: If the necessary IDs are not provided.
Returns
-------
RetrievalMetricResult: The result with the computed MRR score.
"""
# Checking for the required arguments
if (
retrieved_ids is None
or expected_ids is None
or not retrieved_ids
or not expected_ids
):
raise ValueError("Retrieved ids and expected ids must be provided")
if self.use_granular_mrr:
# Granular MRR calculation: All relevant retrieved docs have their reciprocal ranks summed and averaged
expected_set = set(expected_ids)
reciprocal_rank_sum = 0.0
relevant_docs_count = 0
for index, doc_id in enumerate(retrieved_ids):
if doc_id in expected_set:
relevant_docs_count += 1
reciprocal_rank_sum += 1.0 / (index + 1)
mrr_score = (
reciprocal_rank_sum / relevant_docs_count
if relevant_docs_count > 0
else 0.0
)
else:
# Default MRR calculation: Reciprocal rank of the first relevant document retrieved
for i, id in enumerate(retrieved_ids):
if id in expected_ids:
return RetrievalMetricResult(score=1.0 / (i + 1))
mrr_score = 0.0
return RetrievalMetricResult(score=mrr_score)
| MRR |
python | getsentry__sentry | tests/sentry/explore/endpoints/test_explore_saved_query_starred_order.py | {
"start": 175,
"end": 2672
} | class ____(APITestCase, SnubaTestCase):
feature_name = "organizations:visibility-explore-view"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.project_ids = [
self.create_project(organization=self.org).id,
self.create_project(organization=self.org).id,
]
query = {"query": [{"fields": ["span.op"], "mode": "samples"}]}
self.model_a = ExploreSavedQuery.objects.create(
organization=self.org, created_by_id=self.user.id, name="Test query A", query=query
)
self.model_b = ExploreSavedQuery.objects.create(
organization=self.org, created_by_id=self.user.id, name="Test query B", query=query
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query_id=self.model_a.id,
position=1,
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query_id=self.model_b.id,
position=2,
)
self.url = reverse("sentry-api-0-explore-saved-query-starred-order", args=[self.org.slug])
def test_put(self) -> None:
with self.feature(self.feature_name):
ids = (
ExploreSavedQueryStarred.objects.filter(organization=self.org, user_id=self.user.id)
.order_by("position")
.values_list("explore_saved_query_id", flat=True)
)
assert list(ids) == [self.model_a.id, self.model_b.id]
response = self.client.put(
self.url, data={"query_ids": [self.model_b.id, self.model_a.id]}
)
assert response.status_code == 204
ids = (
ExploreSavedQueryStarred.objects.filter(organization=self.org, user_id=self.user.id)
.order_by("position")
.values_list("explore_saved_query_id", flat=True)
)
assert list(ids) == [self.model_b.id, self.model_a.id]
def test_put_invalid_query_ids(self) -> None:
with self.feature(self.feature_name):
response = self.client.put(
self.url, data={"query_ids": [self.model_a.id, self.model_a.id]}
)
assert response.status_code == 400
| ExploreSavedQueryStarredOrderTest |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_details.py | {
"start": 7381,
"end": 7638
} | class ____(serializers.Serializer[User]):
organizations = serializers.ListField(
child=serializers.CharField(required=False), required=True
)
hardDelete = serializers.BooleanField(required=False)
@control_silo_endpoint
| DeleteUserSerializer |
python | pypa__warehouse | tests/unit/accounts/test_views.py | {
"start": 47380,
"end": 55259
} | class ____:
def test_webauthn_get_options_already_authenticated(self):
request = pretend.stub(user=pretend.stub(), _=lambda a: a)
result = views.webauthn_authentication_options(request)
assert result == {"fail": {"errors": ["Already authenticated"]}}
def test_webauthn_get_options_invalid_token(self, monkeypatch, pyramid_request):
_get_two_factor_data = pretend.raiser(TokenException)
monkeypatch.setattr(views, "_get_two_factor_data", _get_two_factor_data)
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.webauthn_authentication_options(pyramid_request)
assert pyramid_request.session.flash.calls == [
pretend.call("Invalid or expired two factor login.", queue="error")
]
assert result == {"fail": {"errors": ["Invalid or expired two factor login."]}}
def test_webauthn_get_options(self, monkeypatch):
_get_two_factor_data = pretend.call_recorder(
lambda r: {"redirect_to": "foobar", "userid": 1}
)
monkeypatch.setattr(views, "_get_two_factor_data", _get_two_factor_data)
user_service = pretend.stub(
get_webauthn_assertion_options=lambda *a, **kw: {"not": "real"}
)
request = pretend.stub(
session=pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None),
get_webauthn_challenge=pretend.call_recorder(lambda: "not_real"),
),
registry=pretend.stub(settings=pretend.stub(get=lambda *a: pretend.stub())),
domain=pretend.stub(),
user=None,
find_service=lambda interface, **kwargs: user_service,
)
result = views.webauthn_authentication_options(request)
assert _get_two_factor_data.calls == [pretend.call(request)]
assert result == {"not": "real"}
def test_webauthn_validate_already_authenticated(self):
# TODO: Determine why we can't use `request.user` here.
request = pretend.stub(identity=pretend.stub())
result = views.webauthn_authentication_validate(request)
assert result == {"fail": {"errors": ["Already authenticated"]}}
def test_webauthn_validate_invalid_token(self, monkeypatch, pyramid_request):
_get_two_factor_data = pretend.raiser(TokenException)
monkeypatch.setattr(views, "_get_two_factor_data", _get_two_factor_data)
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.webauthn_authentication_validate(pyramid_request)
assert pyramid_request.session.flash.calls == [
pretend.call("Invalid or expired two factor login.", queue="error")
]
assert result == {"fail": {"errors": ["Invalid or expired two factor login."]}}
def test_webauthn_validate_invalid_form(self, monkeypatch):
_get_two_factor_data = pretend.call_recorder(
lambda r: {"redirect_to": "foobar", "userid": 1}
)
monkeypatch.setattr(views, "_get_two_factor_data", _get_two_factor_data)
request = pretend.stub(
# TODO: Determine why we can't use `request.user` here.
identity=None,
POST={},
session=pretend.stub(
get_webauthn_challenge=pretend.call_recorder(lambda: "not_real"),
clear_webauthn_challenge=pretend.call_recorder(lambda: pretend.stub()),
),
find_service=lambda *a, **kw: pretend.stub(),
host_url=pretend.stub(),
registry=pretend.stub(settings=pretend.stub(get=lambda *a: pretend.stub())),
rp_id=pretend.stub(),
domain=pretend.stub(),
)
form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: False),
credential=pretend.stub(errors=["Fake validation failure"]),
)
form_class = pretend.call_recorder(lambda *a, **kw: form_obj)
monkeypatch.setattr(views, "WebAuthnAuthenticationForm", form_class)
result = views.webauthn_authentication_validate(request)
assert _get_two_factor_data.calls == [pretend.call(request)]
assert request.session.get_webauthn_challenge.calls == [pretend.call()]
assert request.session.clear_webauthn_challenge.calls == [pretend.call()]
assert result == {"fail": {"errors": ["Fake validation failure"]}}
@pytest.mark.parametrize("has_recovery_codes", [True, False])
@pytest.mark.parametrize("remember_device", [True, False])
def test_webauthn_validate(
self, monkeypatch, pyramid_request, has_recovery_codes, remember_device
):
_get_two_factor_data = pretend.call_recorder(
lambda r: {"redirect_to": "foobar", "userid": 1}
)
monkeypatch.setattr(views, "_get_two_factor_data", _get_two_factor_data)
_login_user = pretend.call_recorder(lambda *a, **kw: pretend.stub())
monkeypatch.setattr(views, "_login_user", _login_user)
_remember_device = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(views, "_remember_device", _remember_device)
user = pretend.stub(
webauthn=pretend.stub(sign_count=pretend.stub()),
has_recovery_codes=has_recovery_codes,
)
user_service = pretend.stub(
get_user=pretend.call_recorder(lambda uid: user),
get_webauthn_by_credential_id=pretend.call_recorder(
lambda *a: pretend.stub(label="webauthn_label")
),
)
pyramid_request.session = pretend.stub(
get_webauthn_challenge=pretend.call_recorder(lambda: "not_real"),
clear_webauthn_challenge=pretend.call_recorder(lambda: pretend.stub()),
)
pyramid_request.find_service = lambda *a, **kw: user_service
pyramid_request.user = user
form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: True),
credential=pretend.stub(errors=["Fake validation failure"]),
validated_credential=VerifiedAuthentication(
credential_id=b"",
new_sign_count=1,
credential_device_type="single_device",
credential_backed_up=False,
user_verified=False,
),
remember_device=pretend.stub(data=remember_device),
)
form_class = pretend.call_recorder(lambda *a, **kw: form_obj)
monkeypatch.setattr(views, "WebAuthnAuthenticationForm", form_class)
send_email = pretend.call_recorder(lambda *a: None)
monkeypatch.setattr(views, "send_recovery_code_reminder_email", send_email)
result = views.webauthn_authentication_validate(pyramid_request)
assert _get_two_factor_data.calls == [pretend.call(pyramid_request)]
assert _login_user.calls == [
pretend.call(
pyramid_request,
1,
"webauthn",
two_factor_label="webauthn_label",
)
]
assert pyramid_request.session.get_webauthn_challenge.calls == [pretend.call()]
assert pyramid_request.session.clear_webauthn_challenge.calls == [
pretend.call()
]
assert send_email.calls == (
[] if has_recovery_codes else [pretend.call(pyramid_request, user)]
)
assert _remember_device.calls == (
[]
if not remember_device
else [
pretend.call(pyramid_request, pyramid_request.response, 1, "webauthn")
]
)
assert result == {
"success": "Successful WebAuthn assertion",
"redirect_to": "foobar",
}
| TestWebAuthn |
python | redis__redis-py | tests/test_asyncio/test_cluster.py | {
"start": 1612,
"end": 9736
} | class ____:
"""A class to proxy a node connection to a different port"""
def __init__(self, addr, redis_addr):
self.addr = addr
self.redis_addr = redis_addr
self.server = None
self.task = None
self.n_connections = 0
async def start(self):
# test that we can connect to redis
async with async_timeout(2):
_, redis_writer = await asyncio.open_connection(*self.redis_addr)
redis_writer.close()
self.server = await asyncio.start_server(
self.handle, *self.addr, reuse_address=True
)
self.task = asyncio.create_task(self.server.serve_forever())
async def handle(self, reader, writer):
# establish connection to redis
redis_reader, redis_writer = await asyncio.open_connection(*self.redis_addr)
try:
self.n_connections += 1
pipe1 = asyncio.create_task(self.pipe(reader, redis_writer))
pipe2 = asyncio.create_task(self.pipe(redis_reader, writer))
await asyncio.gather(pipe1, pipe2)
finally:
redis_writer.close()
await self.redis_writer.wait_closed()
writer.close()
await writer.wait_closed()
async def aclose(self):
try:
self.task.cancel()
await asyncio.wait_for(self.task, timeout=1)
self.server.close()
await self.server.wait_closed()
except asyncio.TimeoutError:
pass
except asyncio.CancelledError:
pass
async def pipe(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
):
while True:
data = await reader.read(1000)
if not data:
break
writer.write(data)
await writer.drain()
@pytest_asyncio.fixture()
async def slowlog(r: RedisCluster) -> None:
"""
Set the slowlog threshold to 0, and the
max length to 128. This will force every
command into the slowlog and allow us
to test it
"""
# Save old values
current_config = await r.config_get(target_nodes=r.get_primaries()[0])
old_slower_than_value = current_config["slowlog-log-slower-than"]
old_max_length_value = current_config["slowlog-max-len"]
# Set the new values
await r.config_set("slowlog-log-slower-than", 0)
await r.config_set("slowlog-max-len", 128)
yield
await r.config_set("slowlog-log-slower-than", old_slower_than_value)
await r.config_set("slowlog-max-len", old_max_length_value)
async def get_mocked_redis_client(
cluster_slots_raise_error=False, *args, **kwargs
) -> RedisCluster:
"""
Return a stable RedisCluster object that have deterministic
nodes and slots setup to remove the problem of different IP addresses
on different installations and machines.
"""
cluster_slots = kwargs.pop("cluster_slots", default_cluster_slots)
coverage_res = kwargs.pop("coverage_result", "yes")
cluster_enabled = kwargs.pop("cluster_enabled", True)
with mock.patch.object(ClusterNode, "execute_command") as execute_command_mock:
async def execute_command(*_args, **_kwargs):
if _args[0] == "CLUSTER SLOTS":
if cluster_slots_raise_error:
raise ResponseError()
else:
mock_cluster_slots = cluster_slots
return mock_cluster_slots
elif _args[0] == "COMMAND":
return {"get": [], "set": []}
elif _args[0] == "INFO":
return {"cluster_enabled": cluster_enabled}
elif len(_args) > 1 and _args[1] == "cluster-require-full-coverage":
return {"cluster-require-full-coverage": coverage_res}
else:
return await execute_command_mock(*_args, **_kwargs)
execute_command_mock.side_effect = execute_command
with mock.patch.object(
AsyncCommandsParser, "initialize", autospec=True
) as cmd_parser_initialize:
def cmd_init_mock(self, r: ClusterNode) -> None:
self.commands = {
"get": {
"name": "get",
"arity": 2,
"flags": ["readonly", "fast"],
"first_key_pos": 1,
"last_key_pos": 1,
"step_count": 1,
}
}
cmd_parser_initialize.side_effect = cmd_init_mock
# Create a subclass of RedisCluster that overrides __del__
class MockedRedisCluster(RedisCluster):
def __del__(self):
# Override to prevent connection cleanup attempts
pass
@property
def connection_pool(self):
# Required abstract property implementation
return self.nodes_manager.get_default_node().redis_connection.connection_pool
return await MockedRedisCluster(*args, **kwargs)
def mock_node_resp(node: ClusterNode, response: Any) -> ClusterNode:
connection = mock.AsyncMock(spec=Connection)
connection.is_connected = True
connection.read_response.return_value = response
while node._free:
node._free.pop()
node._free.append(connection)
return node
def mock_node_resp_exc(node: ClusterNode, exc: Exception) -> ClusterNode:
connection = mock.AsyncMock(spec=Connection)
connection.is_connected = True
connection.read_response.side_effect = exc
while node._free:
node._free.pop()
node._free.append(connection)
return node
def mock_all_nodes_resp(rc: RedisCluster, response: Any) -> RedisCluster:
for node in rc.get_nodes():
mock_node_resp(node, response)
return rc
async def moved_redirection_helper(
create_redis: Callable[..., RedisCluster], failover: bool = False
) -> None:
"""
Test that the client handles MOVED response after a failover.
Redirection after a failover means that the redirection address is of a
replica that was promoted to a primary.
At first call it should return a MOVED ResponseError that will point
the client to the next server it should talk to.
Verify that:
1. it tries to talk to the redirected node
2. it updates the slot's primary to the redirected node
For a failover, also verify:
3. the redirected node's server type updated to 'primary'
4. the server type of the previous slot owner updated to 'replica'
"""
rc = await create_redis(cls=RedisCluster, flushdb=False)
slot = 12182
redirect_node = None
# Get the current primary that holds this slot
prev_primary = rc.nodes_manager.get_node_from_slot(slot)
if failover:
if len(rc.nodes_manager.slots_cache[slot]) < 2:
warnings.warn("Skipping this test since it requires to have a replica")
return
redirect_node = rc.nodes_manager.slots_cache[slot][1]
else:
# Use one of the primaries to be the redirected node
redirect_node = rc.get_primaries()[0]
r_host = redirect_node.host
r_port = redirect_node.port
with mock.patch.object(
ClusterNode, "execute_command", autospec=True
) as execute_command:
def moved_redirect_effect(self, *args, **options):
def ok_response(self, *args, **options):
assert self.host == r_host
assert self.port == r_port
return "MOCK_OK"
execute_command.side_effect = ok_response
raise MovedError(f"{slot} {r_host}:{r_port}")
execute_command.side_effect = moved_redirect_effect
assert await rc.execute_command("SET", "foo", "bar") == "MOCK_OK"
slot_primary = rc.nodes_manager.slots_cache[slot][0]
assert slot_primary == redirect_node
if failover:
assert rc.get_node(host=r_host, port=r_port).server_type == PRIMARY
assert prev_primary.server_type == REPLICA
| NodeProxy |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/bigquery.py | {
"start": 37558,
"end": 50236
} | class ____(GoogleCloudBaseOperator, _BigQueryOperatorsEncryptionConfigurationMixin):
"""
Fetch data and return it, either from a BigQuery table, or results of a query job.
Data could be narrowed down by specific columns or retrieved as a whole.
It is returned in either of the following two formats, based on "as_dict" value:
1. False (Default) - A Python list of lists, with the number of nested lists equal to the number of rows
fetched. Each nested list represents a row, where the elements within it correspond to the column values
for that particular row.
**Example Result**: ``[['Tony', 10], ['Mike', 20]``
2. True - A Python list of dictionaries, where each dictionary represents a row. In each dictionary,
the keys are the column names and the values are the corresponding values for those columns.
**Example Result**: ``[{'name': 'Tony', 'age': 10}, {'name': 'Mike', 'age': 20}]``
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDataOperator`
.. note::
If you pass fields to ``selected_fields`` which are in different order than the
order of columns already in
BQ table/job, the data will still be in the order of BQ table.
For example if the BQ table has 3 columns as
``[A,B,C]`` and you pass 'B,A' in the ``selected_fields``
the data would still be of the form ``'A,B'``.
.. note::
When utilizing job id not in deferrable mode, the job should be in DONE state.
**Example - Retrieve data from BigQuery using table**::
get_data = BigQueryGetDataOperator(
task_id="get_data_from_bq",
dataset_id="test_dataset",
table_id="Transaction_partitions",
table_project_id="internal-gcp-project",
max_results=100,
selected_fields="DATE",
gcp_conn_id="airflow-conn-id",
)
**Example - Retrieve data from BigQuery using a job id**::
get_data = BigQueryGetDataOperator(
job_id="airflow_8999918812727394_86a1cecc69c5e3028d28247affd7563",
job_project_id="internal-gcp-project",
max_results=100,
selected_fields="DATE",
gcp_conn_id="airflow-conn-id",
)
:param dataset_id: The dataset ID of the requested table. (templated)
:param table_id: The table ID of the requested table. Mutually exclusive with job_id. (templated)
:param table_project_id: (Optional) The project ID of the requested table.
If None, it will be derived from the hook's project ID. (templated)
:param job_id: The job ID from which query results are retrieved.
Mutually exclusive with table_id. (templated)
:param job_project_id: (Optional) Google Cloud Project where the job is running.
If None, it will be derived from the hook's project ID. (templated)
:param project_id: (Deprecated) (Optional) The name of the project where the data
will be returned from. If None, it will be derived from the hook's project ID. (templated)
:param max_results: The maximum number of records (rows) to be fetched
from the table. (templated)
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param encryption_configuration: (Optional) Custom encryption configuration (e.g., Cloud KMS keys).
.. code-block:: python
encryption_configuration = {
"kmsKeyName": "projects/PROJECT/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY",
}
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode
:param poll_interval: (Deferrable mode only) polling period in seconds to check for the status of job.
Defaults to 4 seconds.
:param as_dict: if True returns the result as a list of dictionaries, otherwise as list of lists
(default: False).
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
"""
template_fields: Sequence[str] = (
"dataset_id",
"table_id",
"table_project_id",
"job_id",
"job_project_id",
"project_id",
"max_results",
"selected_fields",
"gcp_conn_id",
"impersonation_chain",
)
ui_color = BigQueryUIColors.QUERY.value
def __init__(
self,
*,
dataset_id: str | None = None,
table_id: str | None = None,
table_project_id: str | None = None,
job_id: str | None = None,
job_project_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
max_results: int = 100,
selected_fields: str | None = None,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
encryption_configuration: dict | None = None,
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: float = 4.0,
as_dict: bool = False,
use_legacy_sql: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.table_project_id = table_project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.job_project_id = job_project_id
self.job_id = job_id
self.max_results = max_results
self.selected_fields = selected_fields
self.gcp_conn_id = gcp_conn_id
self.location = location
self.impersonation_chain = impersonation_chain
self.encryption_configuration = encryption_configuration
self.project_id = project_id
self.deferrable = deferrable
self.poll_interval = poll_interval
self.as_dict = as_dict
self.use_legacy_sql = use_legacy_sql
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
get_query = self.generate_query(hook=hook)
configuration = {"query": {"query": get_query, "useLegacySql": self.use_legacy_sql}}
self.include_encryption_configuration(configuration, "query")
"""Submit a new job and get the job id for polling the status using Triggerer."""
return hook.insert_job(
configuration=configuration,
location=self.location,
project_id=self.job_project_id or hook.project_id,
job_id=job_id,
nowait=True,
)
def generate_query(self, hook: BigQueryHook) -> str:
"""Generate a SELECT query if for the given dataset and table ID."""
query = "select "
if self.selected_fields:
query += self.selected_fields
else:
query += "*"
query += (
f" from `{self.table_project_id or hook.project_id}.{self.dataset_id}"
f".{self.table_id}` limit {self.max_results}"
)
return query
def execute(self, context: Context):
if self.project_id:
self.log.warning(
"The project_id parameter is deprecated, and will be removed in a future release."
" Please use table_project_id instead.",
)
if not self.table_project_id:
self.table_project_id = self.project_id
else:
self.log.info("Ignoring 'project_id' parameter, as 'table_project_id' is found.")
if not exactly_one(self.job_id, self.table_id):
raise AirflowException(
"'job_id' and 'table_id' parameters are mutually exclusive, "
"ensure that exactly one of them is specified"
)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
use_legacy_sql=self.use_legacy_sql,
)
if not self.deferrable:
if not self.job_id:
self.log.info(
"Fetching Data from %s.%s.%s max results: %s",
self.table_project_id or hook.project_id,
self.dataset_id,
self.table_id,
self.max_results,
)
if not self.selected_fields:
schema: dict[str, list] = hook.get_schema(
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.table_project_id or hook.project_id,
)
if "fields" in schema:
self.selected_fields = ",".join([field["name"] for field in schema["fields"]])
rows: list[Row] | RowIterator | list[dict[str, Any]] = hook.list_rows(
dataset_id=self.dataset_id,
table_id=self.table_id,
max_results=self.max_results,
selected_fields=self.selected_fields,
location=self.location,
project_id=self.table_project_id or hook.project_id,
)
else:
self.log.info(
"Fetching data from job '%s:%s.%s' max results: %s",
self.job_project_id or hook.project_id,
self.location,
self.job_id,
self.max_results,
)
rows = hook.get_query_results(
job_id=self.job_id,
location=self.location,
selected_fields=self.selected_fields,
max_results=self.max_results,
project_id=self.job_project_id or hook.project_id,
)
if isinstance(rows, RowIterator):
raise TypeError(
"BigQueryHook.list_rows() returns iterator when return_iterator is False (default)"
)
self.log.info("Total extracted rows: %s", len(rows))
table_data: list[dict[str, Any]] | list[Any]
if self.as_dict:
table_data = [dict(row) for row in rows]
else:
table_data = [row.values() if isinstance(row, Row) else list(row.values()) for row in rows]
return table_data
if not self.job_id:
job: BigQueryJob | UnknownJob = self._submit_job(hook, job_id="")
else:
job = hook.get_job(
job_id=self.job_id, project_id=self.job_project_id or hook.project_id, location=self.location
)
context["ti"].xcom_push(key="job_id", value=job.job_id)
self.defer(
timeout=self.execution_timeout,
trigger=BigQueryGetDataTrigger(
conn_id=self.gcp_conn_id,
job_id=job.job_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.job_project_id or hook.project_id,
location=self.location or hook.location,
poll_interval=self.poll_interval,
as_dict=self.as_dict,
impersonation_chain=self.impersonation_chain,
selected_fields=self.selected_fields,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]) -> Any:
"""
Act as a callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info("Total extracted rows: %s", len(event["records"]))
return event["records"]
| BigQueryGetDataOperator |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 119199,
"end": 120585
} | class ____(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
"""Derivative with respect to parameters."""
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
def _tau_validator(self, val):
"""tau cannot be 0."""
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
tau._validator = _tau_validator
@property
def input_units(self):
if self.tau.input_unit is None:
return None
return {self.inputs[0]: self.tau.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"tau": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
| Exponential1D |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.