language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/git/modeling_git.py | {
"start": 33652,
"end": 35601
} | class ____(GitPreTrainedModel):
config: GitVisionConfig
main_input_name = "pixel_values"
input_modalities = ("image",)
# Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP->Git
def __init__(self, config: GitVisionConfig):
super().__init__(config)
self.vision_model = GitVisionTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GitVisionModel
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
>>> model = GitVisionModel.from_pretrained("microsoft/git-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
return self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
| GitVisionModel |
python | openai__openai-python | src/openai/lib/streaming/chat/_events.py | {
"start": 437,
"end": 665
} | class ____(BaseModel):
"""This event is yielded for every chunk with `choice.delta.content` data."""
type: Literal["content.delta"]
delta: str
snapshot: str
parsed: Optional[object] = None
| ContentDeltaEvent |
python | django__django | tests/select_related_onetoone/models.py | {
"start": 1581,
"end": 1817
} | class ____(models.Model):
name = models.CharField(max_length=50)
previous_item = models.OneToOneField(
"self",
models.CASCADE,
related_name="next_item",
blank=True,
null=True,
)
| LinkedList |
python | realpython__materials | fastapi-url-shortener/source_code_final/shortener_app/config.py | {
"start": 69,
"end": 408
} | class ____(BaseSettings):
env_name: str = "Local"
base_url: str = "http://localhost:8000"
db_url: str = "sqlite:///./shortener.db"
class Config:
env_file = ".env"
@lru_cache
def get_settings() -> Settings:
settings = Settings()
print(f"Loading settings for: {settings.env_name}")
return settings
| Settings |
python | aio-libs__aiohttp | aiohttp/abc.py | {
"start": 2628,
"end": 3012
} | class ____(ABC):
"""Abstract class based view."""
def __init__(self, request: Request) -> None:
self._request = request
@property
def request(self) -> Request:
"""Request instance."""
return self._request
@abstractmethod
def __await__(self) -> Generator[None, None, StreamResponse]:
"""Execute the view handler."""
| AbstractView |
python | scrapy__scrapy | scrapy/spidermiddlewares/base.py | {
"start": 396,
"end": 3994
} | class ____:
"""Optional base class for spider middlewares.
.. versionadded:: 2.13
This class provides helper methods for asynchronous
``process_spider_output()`` and ``process_start()`` methods. Middlewares
that don't have either of these methods don't need to use this class.
You can override the
:meth:`~scrapy.spidermiddlewares.base.BaseSpiderMiddleware.get_processed_request`
method to add processing code for requests and the
:meth:`~scrapy.spidermiddlewares.base.BaseSpiderMiddleware.get_processed_item`
method to add processing code for items. These methods take a single
request or item from the spider output iterable and return a request or
item (the same or a new one), or ``None`` to remove this request or item
from the processing.
"""
def __init__(self, crawler: Crawler):
self.crawler: Crawler = crawler
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler)
def process_start_requests(
self, start: Iterable[Any], spider: Spider
) -> Iterable[Any]:
for o in start:
if (o := self._get_processed(o, None)) is not None:
yield o
async def process_start(self, start: AsyncIterator[Any]) -> AsyncIterator[Any]:
async for o in start:
if (o := self._get_processed(o, None)) is not None:
yield o
@_warn_spider_arg
def process_spider_output(
self, response: Response, result: Iterable[Any], spider: Spider | None = None
) -> Iterable[Any]:
for o in result:
if (o := self._get_processed(o, response)) is not None:
yield o
@_warn_spider_arg
async def process_spider_output_async(
self,
response: Response,
result: AsyncIterator[Any],
spider: Spider | None = None,
) -> AsyncIterator[Any]:
async for o in result:
if (o := self._get_processed(o, response)) is not None:
yield o
def _get_processed(self, o: Any, response: Response | None) -> Any:
if isinstance(o, Request):
return self.get_processed_request(o, response)
return self.get_processed_item(o, response)
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
"""Return a processed request from the spider output.
This method is called with a single request from the start seeds or the
spider output. It should return the same or a different request, or
``None`` to ignore it.
:param request: the input request
:type request: :class:`~scrapy.Request` object
:param response: the response being processed
:type response: :class:`~scrapy.http.Response` object or ``None`` for
start seeds
:return: the processed request or ``None``
"""
return request
def get_processed_item(self, item: Any, response: Response | None) -> Any:
"""Return a processed item from the spider output.
This method is called with a single item from the start seeds or the
spider output. It should return the same or a different item, or
``None`` to ignore it.
:param item: the input item
:type item: item object
:param response: the response being processed
:type response: :class:`~scrapy.http.Response` object or ``None`` for
start seeds
:return: the processed item or ``None``
"""
return item
| BaseSpiderMiddleware |
python | numba__numba | numba/cuda/tests/cudapy/test_record_dtype.py | {
"start": 8725,
"end": 9106
} | class ____(TestRecordDtype):
'''
Same as TestRecordDtype, but using structured arrays instead of recarrays.
'''
def _createSampleArrays(self):
self.sample1d = np.zeros(3, dtype=recordtype)
self.samplerec1darr = np.zeros(1, dtype=recordwitharray)[0]
self.samplerec2darr = np.zeros(1, dtype=recordwith2darray)[0]
| TestRecordDtypeWithStructArrays |
python | numba__numba | numba/core/types/containers.py | {
"start": 14594,
"end": 14754
} | class ____(BaseContainerPayload):
"""
Internal type class for the dynamically-allocated payload of a list.
"""
container_class = List
| ListPayload |
python | realpython__materials | duck-typing-python/shapes.py | {
"start": 50,
"end": 147
} | class ____(Protocol):
def area(self) -> float: ...
def perimeter(self) -> float: ...
| Shape |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict19.py | {
"start": 317,
"end": 832
} | class ____(TypedDict):
x: Required[str]
def func1(td: TD1 | TD2):
# This should generate an error because "x" is not required in TD1.
v1 = td["x"]
def func2(td: TD1 | TD2):
td["x"] = "hi"
v1 = td["x"]
def func3(td: TD1 | TD2, opt: bool):
if opt:
td["x"] = "hi"
# This should generate an error because "x" is not required in TD1.
v1 = td["x"]
def func4(td: TD1 | TD2, opt: bool):
if opt:
td["x"] = "hi"
else:
td["x"] = "hi"
v1 = td["x"]
| TD2 |
python | getsentry__sentry | src/sentry/sentry_metrics/use_case_id_registry.py | {
"start": 305,
"end": 2559
} | class ____(Enum):
SPANS = "spans"
TRANSACTIONS = "transactions"
SESSIONS = "sessions"
ESCALATING_ISSUES = "escalating_issues"
PROFILES = "profiles"
METRIC_STATS = "metric_stats"
USE_CASE_ID_API_ACCESSES: Mapping[UseCaseID, UseCaseIDAPIAccess] = {
UseCaseID.SPANS: UseCaseIDAPIAccess.PUBLIC,
UseCaseID.TRANSACTIONS: UseCaseIDAPIAccess.PUBLIC,
UseCaseID.SESSIONS: UseCaseIDAPIAccess.PUBLIC,
UseCaseID.ESCALATING_ISSUES: UseCaseIDAPIAccess.PRIVATE,
UseCaseID.PROFILES: UseCaseIDAPIAccess.PRIVATE,
UseCaseID.METRIC_STATS: UseCaseIDAPIAccess.PRIVATE,
}
# UseCaseKey will be renamed to MetricPathKey
METRIC_PATH_MAPPING: Mapping[UseCaseID, UseCaseKey] = {
UseCaseID.SPANS: UseCaseKey.PERFORMANCE,
UseCaseID.TRANSACTIONS: UseCaseKey.PERFORMANCE,
UseCaseID.SESSIONS: UseCaseKey.RELEASE_HEALTH,
UseCaseID.ESCALATING_ISSUES: UseCaseKey.PERFORMANCE,
UseCaseID.PROFILES: UseCaseKey.PERFORMANCE,
UseCaseID.METRIC_STATS: UseCaseKey.PERFORMANCE,
}
# TODO: Remove this as soon as the entire indexer system is use case aware
# as this is temporary and eventually UseCaseKey will have a 1:N relationship
# with UseCaseID
REVERSE_METRIC_PATH_MAPPING: Mapping[UseCaseKey, UseCaseID] = {
UseCaseKey.RELEASE_HEALTH: UseCaseID.SESSIONS,
UseCaseKey.PERFORMANCE: UseCaseID.TRANSACTIONS,
}
# Temporary allowlist until all use cases have cardinality limit options
CARDINALITY_LIMIT_USE_CASES = (
UseCaseID.TRANSACTIONS,
UseCaseID.SESSIONS,
UseCaseID.SPANS,
)
USE_CASE_ID_WRITES_LIMIT_QUOTA_OPTIONS = {
UseCaseID.SPANS: "sentry-metrics.writes-limiter.limits.spans",
UseCaseID.TRANSACTIONS: "sentry-metrics.writes-limiter.limits.performance",
UseCaseID.SESSIONS: "sentry-metrics.writes-limiter.limits.releasehealth",
}
def get_use_case_id_api_access(use_case_id: UseCaseID) -> UseCaseIDAPIAccess:
"""
Returns the api access visibility of a use case and defaults to private in case no api access is provided.
The rationale for defaulting to private visibility is that we do not want to leak by mistake any internal metrics
that users should not have access to.
"""
return USE_CASE_ID_API_ACCESSES.get(use_case_id, UseCaseIDAPIAccess.PRIVATE)
| UseCaseID |
python | getsentry__sentry | src/sentry/api/serializers/models/event.py | {
"start": 1773,
"end": 4933
} | class ____(EventTagOptional):
key: str
value: str
def get_crash_files(events):
event_ids = [x.event_id for x in events if x.platform == "native"]
if event_ids:
return [
ea
for ea in EventAttachment.objects.filter(event_id__in=event_ids)
if ea.type in CRASH_FILE_TYPES
]
return []
def get_tags_with_meta(event) -> tuple[list[EventTag], dict[str, Any] | None]:
meta = get_path(event.data, "_meta", "tags") or {}
# If we have meta, we need to get the tags in their original order
# from the raw event body as the indexes need to line up with the
# metadata indexes. In other cases we can use event.tags
if meta:
raw_tags = event.data.get("tags") or []
else:
raw_tags = event.tags
tags_and_meta: list[tuple[EventTag, dict[str, str] | None]] = sorted(
(
(
{
"key": kv[0] and kv[0].split("sentry:", 1)[-1],
"value": kv[1],
},
prune_empty_keys(
{
"key": get_path(meta, str(i), "0"),
"value": get_path(meta, str(i), "1"),
}
)
or None,
)
for i, kv in enumerate(raw_tags)
if kv is not None
),
key=lambda x: x[0]["key"] if x[0]["key"] is not None else "",
)
# Add 'query' for each tag to tell the UI what to use as query
# params for this tag.
tags = [tag for tag, _ in tags_and_meta]
for tag in tags:
query = convert_user_tag_to_query(tag["key"], tag["value"])
if query:
tag["query"] = query
map_device_class_tags(tags)
tags_meta = prune_empty_keys({str(i): meta for i, (_, meta) in enumerate(tags_and_meta)})
return tags, meta_with_chunks(tags, tags_meta)
def get_entries(event: Event | GroupEvent, user: User, is_public: bool = False):
# XXX(dcramer): These are called entries for future-proofing
platform = event.platform
meta = event.data.get("_meta") or {}
interface_list = []
for key, interface in event.interfaces.items():
# we treat user as a special contextual item
if key in RESERVED_KEYS:
continue
data = interface.get_api_context(is_public=is_public, platform=platform)
# data might not be returned for e.g. a public HTTP repr
# However, spans can be an empty list and should still be included.
if not data and interface.path != "spans":
continue
entry = {"data": data, "type": interface.external_type}
api_meta = None
if meta.get(key):
api_meta = interface.get_api_meta(meta[key], is_public=is_public, platform=platform)
api_meta = meta_with_chunks(data, api_meta)
interface_list.append((interface, entry, api_meta))
interface_list.sort(key=lambda x: x[0].get_display_score(), reverse=True)
return (
[i[1] for i in interface_list],
{k: {"data": i[2]} for k, i in enumerate(interface_list) if i[2]},
)
| EventTag |
python | donnemartin__system-design-primer | solutions/system_design/query_cache/query_cache_snippets.py | {
"start": 26,
"end": 704
} | class ____(object):
def __init__(self, memory_cache, reverse_index_cluster):
self.memory_cache = memory_cache
self.reverse_index_cluster = reverse_index_cluster
def parse_query(self, query):
"""Remove markup, break text into terms, deal with typos,
normalize capitalization, convert to use boolean operations.
"""
...
def process_query(self, query):
query = self.parse_query(query)
results = self.memory_cache.get(query)
if results is None:
results = self.reverse_index_cluster.process_search(query)
self.memory_cache.set(query, results)
return results
| QueryApi |
python | agronholm__apscheduler | src/apscheduler/_enums.py | {
"start": 475,
"end": 921
} | class ____(Enum):
"""
Used to track the running state of schedulers.
.. attribute:: starting
not running yet, but in the process of starting
.. attribute:: started
running
.. attribute:: stopping
still running but in the process of shutting down
.. attribute:: stopped
not running
"""
starting = auto()
started = auto()
stopping = auto()
stopped = auto()
| RunState |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 1220,
"end": 1626
} | class ____:
username = wtforms.StringField(
validators=[wtforms.validators.InputRequired(message="Specify username")]
)
def validate_username(self, field):
userid = self.user_service.find_userid(field.data)
if userid is None:
raise wtforms.validators.ValidationError(
"No user found with that username. Try again."
)
| UsernameMixin |
python | zarr-developers__zarr-python | src/zarr/testing/buffer.py | {
"start": 544,
"end": 658
} | class ____(cpu.Buffer):
"""Example of a custom Buffer that handles ArrayLike"""
__test__ = False
| TestBuffer |
python | walkccc__LeetCode | solutions/1986. Minimum Number of Work Sessions to Finish the Tasks/1986.py | {
"start": 0,
"end": 967
} | class ____:
def minSessions(self, tasks: list[int], sessionTime: int) -> int:
# Returns True if we can assign tasks[s..n) to `sessions`. Note that `sessions`
# may be occupied by some tasks.
def dfs(s: int, sessions: list[int]) -> bool:
if s == len(tasks):
return True
for i, session in enumerate(sessions):
# Can't assign the tasks[s] to this session.
if session + tasks[s] > sessionTime:
continue
# Assign the tasks[s] to this session.
sessions[i] += tasks[s]
if dfs(s + 1, sessions):
return True
# Backtracking.
sessions[i] -= tasks[s]
# If it's the first time we assign the tasks[s] to this session, then future
# `session`s can't satisfy either.
if sessions[i] == 0:
return False
return False
for numSessions in range(1, len(tasks) + 1):
if dfs(0, [0] * numSessions):
return numSessions
| Solution |
python | pytorch__pytorch | torch/utils/benchmark/utils/compare.py | {
"start": 417,
"end": 584
} | class ____(enum.Enum):
NONE = "none"
COLUMNWISE = "columnwise"
ROWWISE = "rowwise"
# Classes to separate internal bookkeeping from what is rendered.
| Colorize |
python | tensorflow__tensorflow | tensorflow/python/ops/math_grad_test.py | {
"start": 1462,
"end": 2477
} | class ____(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(None, None, None, None),
(None, [], None, None),
([], [], [], []),
([], [None], [0], []),
([None], [None], None, None),
([None, 1], [None], [1], [0]),
([None, 1], [1, None], [1], [0]),
([None, 1], [2, None], None, None),
([2], [1], [], [0]),
([2], [2], [], []),
([3, 1, 5, 1, 7], [2, 1, 4, 7], [1, 3], [0, 2]),
)
def testShapes(self, x_shape, y_shape, expected_x_axes, expected_y_axes):
x_axes1, y_axes1 = math_grad._InferGradientReductionAxes(
tensor_shape.TensorShape(x_shape), tensor_shape.TensorShape(y_shape))
y_axes2, x_axes2 = math_grad._InferGradientReductionAxes(
tensor_shape.TensorShape(y_shape), tensor_shape.TensorShape(x_shape))
self.assertEqual(x_axes1, x_axes2)
self.assertEqual(y_axes1, y_axes2)
self.assertEqual(expected_x_axes, x_axes1)
self.assertEqual(expected_y_axes, y_axes1)
| InferGradientReductionAxes |
python | pytorch__pytorch | test/distributed/test_multi_threaded_pg.py | {
"start": 4865,
"end": 12674
} | class ____(MultiThreadedTestCase):
@property
def world_size(self):
return 4
def setUp(self):
os.environ["TORCH_DIST_INIT_BARRIER"] = "1"
super().setUp()
self._spawn_threads()
def tearDown(self):
super().tearDown()
os.environ["TORCH_DIST_INIT_BARRIER"] = "0"
def test_allgather(self):
input_tensor = torch.ones(3, 3) * dist.get_rank()
output_tensors = [
torch.empty_like(input_tensor) for _ in range(self.world_size)
]
dist.all_gather(output_tensors, input_tensor)
for rank, out_tensor in enumerate(output_tensors):
self.assertEqual(out_tensor, torch.ones(3, 3) * rank)
def test_broadcast(self):
input_tensor = torch.ones(3, 3) * dist.get_rank()
for rank in range(self.world_size):
cloned_input = input_tensor.clone()
dist.broadcast(cloned_input, src=rank)
self.assertEqual(cloned_input, torch.ones(3, 3) * rank)
def test_scatter(self):
if dist.get_rank() == 0:
scatter_list = [torch.ones(3, 3) * rank for rank in range(self.world_size)]
else:
scatter_list = None
output_tensor = torch.empty(3, 3)
dist.scatter(output_tensor, scatter_list)
self.assertEqual(output_tensor, torch.ones(3, 3) * dist.get_rank())
def test_reduce_scatter(self):
to_reduce_scatter = [torch.ones(3, 3) * rank for rank in range(self.world_size)]
output_tensor = torch.empty(3, 3)
dist.reduce_scatter(output_tensor, to_reduce_scatter)
expected_tensor = torch.ones(3, 3) * dist.get_rank() * self.world_size
self.assertEqual(output_tensor, expected_tensor)
output_tensor = torch.empty(3, 3)
dist.reduce_scatter(output_tensor, to_reduce_scatter, op=dist.ReduceOp.AVG)
expected_tensor = torch.ones(3, 3) * dist.get_rank()
self.assertEqual(output_tensor, expected_tensor)
def test_broadcast_object_list(self):
val = 99 if dist.get_rank() == 0 else None
object_list = [val] * dist.get_world_size()
print(f"{dist.get_rank()} -> {dist.get_world_size()}")
dist.broadcast_object_list(object_list=object_list)
self.assertEqual(99, object_list[0])
def test_all_reduce(self):
output = torch.ones(3, 3) * dist.get_rank()
dist.all_reduce(output)
res_num = ((0 + self.world_size - 1) * self.world_size) / 2
self.assertEqual(output, torch.ones(3, 3) * res_num)
def test_all_to_all(self):
rank = self.rank
world_size = self.world_size
input_tensor_list = [
torch.ones(3, 3) * x
for x in range(rank * world_size, (rank + 1) * world_size)
]
output_tensor_list = [torch.empty_like(tensor) for tensor in input_tensor_list]
dist.all_to_all(output_tensor_list, input_tensor_list)
expected_tensor_list = [
torch.ones(3, 3) * x
for x in range(rank, world_size * world_size, world_size)
]
self.assertEqual(expected_tensor_list, output_tensor_list)
def test_all_reduce_ops(self):
tensor = torch.tensor([dist.get_rank() + 1])
dist.all_reduce(tensor, op=ReduceOp.PRODUCT)
expected = reduce(operator.mul, range(1, self.world_size + 1))
self.assertEqual(expected, tensor.item())
tensor = torch.tensor([dist.get_rank() + 1])
dist.all_reduce(tensor, op=ReduceOp.MIN)
self.assertEqual(1, tensor.item())
tensor = torch.tensor([dist.get_rank() + 1])
dist.all_reduce(tensor, op=ReduceOp.MAX)
self.assertEqual(self.world_size, tensor.item())
tensor = torch.tensor([dist.get_rank() + 1])
dist.all_reduce(tensor, op=ReduceOp.BAND)
expected = reduce(operator.and_, range(1, self.world_size + 1))
self.assertEqual(expected, tensor.item())
tensor = torch.tensor([dist.get_rank() + 1])
dist.all_reduce(tensor, op=ReduceOp.BOR)
expected = reduce(operator.or_, range(1, self.world_size + 1))
self.assertEqual(expected, tensor.item())
tensor = torch.tensor([dist.get_rank() + 1])
dist.all_reduce(tensor, op=ReduceOp.BXOR)
expected = reduce(operator.xor, range(1, self.world_size + 1))
self.assertEqual(expected, tensor.item())
def test_assert_equal_on_rank(self):
# RNG is shared across threads. So instead of asserting on all threads
# we only assert on rank 0
self_tensor = torch.rand(3, 3)
rank_0_tensor = self_tensor.clone()
dist.broadcast(rank_0_tensor, src=0)
self.assertEqualOnRank(rank_0_tensor, self_tensor, rank=0)
self.assertNotEqualOnRank(rank_0_tensor, self_tensor, rank=1)
def test_subpg(self):
subpg0 = dist.new_group([0, 1])
subpg1 = dist.new_group([2, 3])
current_rank = dist.get_rank()
output = torch.ones(3, 3) * current_rank
# call all_reduce on subpg0 and subpg1 concurrently
if current_rank in [0, 1]:
dist.all_reduce(output, group=subpg0)
else:
dist.all_reduce(output, group=subpg1)
if current_rank in [0, 1]:
self.assertEqual(output, torch.ones(3, 3) * 1)
else:
self.assertEqual(output, torch.ones(3, 3) * 5)
def test_using_pg_from_another_thread(self):
def stuff_in_other_thread(pg):
x = torch.rand(4, requires_grad=True)
dist.all_reduce(x, group=pg)
t = threading.Thread(target=stuff_in_other_thread, args=(dist.group.WORLD,))
t.start()
t.join()
def test_gather(self):
if dist.get_rank() == 0:
gather_list = [torch.empty(3, 3) for _ in range(self.world_size)]
else:
gather_list = None
input_tensor = torch.ones(3, 3) * dist.get_rank()
dist.gather(input_tensor, gather_list)
if dist.get_rank() == 0:
for i in range(self.world_size):
self.assertEqual(gather_list[i], torch.ones(3, 3) * i)
def test_all_reduce_coalesced(self):
t0 = torch.ones(3, 3) * dist.get_rank()
t1 = torch.ones(3, 3) * dist.get_rank() * 2
dist.all_reduce_coalesced([t0, t1])
res_num = ((0 + self.world_size - 1) * self.world_size) / 2
self.assertEqual(t0, torch.ones(3, 3) * res_num)
self.assertEqual(t1, torch.ones(3, 3) * (res_num * 2))
@skip_if_lt_x_gpu(1)
def test_bwd_sees_fwd_pg(self):
fwd_tid = threading.current_thread().ident
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, rank):
result = rank * 2
ctx.save_for_backward(result, rank)
assert int(rank.item()) == dist.get_rank()
return result
@staticmethod
def backward(ctx, grad_output):
result, rank = ctx.saved_tensors
bwd_tid = threading.current_thread().ident
self.assertEqual(
fwd_tid,
bwd_tid,
f"bwd not running in the same thread a fwd for rank {rank.item()}",
)
self.assertTrue(dist.is_initialized())
self.assertEqual(int(rank.item()), dist.get_rank())
dist.all_reduce(result)
self.assertEqual(int(result.item()), 12) # (0 + 1 + 2 + 3) * 2
return grad_output * result
x = torch.tensor(
[dist.get_rank()], dtype=torch.float, device=device_type, requires_grad=True
)
x = MyFunc.apply(x)
x.sum().backward()
if __name__ == "__main__":
run_tests()
| TestCollectivesWithBaseClass |
python | coleifer__peewee | tests/base_models.py | {
"start": 1278,
"end": 1402
} | class ____(TestModel):
sample = ForeignKeyField(Sample, backref='metadata')
value = FloatField(default=0.0)
| SampleMeta |
python | realpython__materials | tic-tac-toe-ai-python/source_code_step_2/tic-tac-toe/library/src/tic_tac_toe/game/players.py | {
"start": 1268,
"end": 1507
} | class ____(ComputerPlayer):
def get_computer_move(self, game_state: GameState) -> Move | None:
try:
return random.choice(game_state.possible_moves)
except IndexError:
return None
| RandomComputerPlayer |
python | google__jax | jax/_src/core.py | {
"start": 106459,
"end": 108081
} | class ____:
__slots__ = ()
def __repr__(self): return "[dynamic]"
def replace_tracer_for_error_message(obj):
# TODO(mattjj): Many ideas for improving this. Crawl the stack and see if
# there are user variables whose value is == to this object? Or search
# parameters of functions being transformed, at least? Or at least assign
# short unique ids to them?
if isinstance(obj, Tracer):
return SomeTracer()
else:
return obj
def evaluate_shape(shape: Shape, dim_vars: Sequence[str],
*dim_values: Array) -> Sequence[Array]:
"""Evaluates a shape possibly containing non-constants.
Args:
shape: the shape to evaluate.
dim_vars: the dimension variables names that may appear in `shape`.
dim_values: the dimension values corresponding to `dim_vars`.
Returns:
a tuple of JAX values corresponding to `shape`, of type
`dim_value_dtype`.
"""
env = dict(zip(dim_vars, dim_values))
def eval_one_dim(d: DimSize):
try:
return operator.index(d)
except:
# Is a _DimExpr
return d._evaluate(env) # type: ignore
return tuple(eval_one_dim(d) for d in shape)
def dim_value_dtype():
"""The dtype to be used for dimension values."""
return dtypes.default_int_dtype()
def dim_constant(ct: int):
dtype = dim_value_dtype()
assert dtype in (np.int32, np.int64)
if dtype == np.int32:
return np.int32(ct)
elif dtype == np.int64:
return np.int64(ct)
def dim_value_aval() -> AbstractValue:
return ShapedArray((), dim_value_dtype(), weak_type=True, sharding=None)
# ------------------- Call -------------------
| SomeTracer |
python | numba__numba | numba/cpython/builtins.py | {
"start": 16546,
"end": 36165
} | class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
assert len(args) == 1
if isinstance(args[0], (types.DType, types.NumberClass)):
return signature(args[0].dtype, *args)
@lower_builtin(get_type_min_value, types.NumberClass)
@lower_builtin(get_type_min_value, types.DType)
def lower_get_type_min_value(context, builder, sig, args):
typ = sig.args[0].dtype
if isinstance(typ, types.Integer):
bw = typ.bitwidth
lty = ir.IntType(bw)
val = typ.minval
res = ir.Constant(lty, val)
elif isinstance(typ, types.Float):
bw = typ.bitwidth
if bw == 32:
lty = ir.FloatType()
elif bw == 64:
lty = ir.DoubleType()
else:
raise NotImplementedError("llvmlite only supports 32 and 64 bit floats")
npty = getattr(np, 'float{}'.format(bw))
res = ir.Constant(lty, -np.inf)
elif isinstance(typ, (types.NPDatetime, types.NPTimedelta)):
bw = 64
lty = ir.IntType(bw)
val = types.int64.minval + 1 # minval is NaT, so minval + 1 is the smallest value
res = ir.Constant(lty, val)
return impl_ret_untracked(context, builder, lty, res)
@lower_builtin(get_type_max_value, types.NumberClass)
@lower_builtin(get_type_max_value, types.DType)
def lower_get_type_max_value(context, builder, sig, args):
typ = sig.args[0].dtype
if isinstance(typ, types.Integer):
bw = typ.bitwidth
lty = ir.IntType(bw)
val = typ.maxval
res = ir.Constant(lty, val)
elif isinstance(typ, types.Float):
bw = typ.bitwidth
if bw == 32:
lty = ir.FloatType()
elif bw == 64:
lty = ir.DoubleType()
else:
raise NotImplementedError("llvmlite only supports 32 and 64 bit floats")
npty = getattr(np, 'float{}'.format(bw))
res = ir.Constant(lty, np.inf)
elif isinstance(typ, (types.NPDatetime, types.NPTimedelta)):
bw = 64
lty = ir.IntType(bw)
val = types.int64.maxval
res = ir.Constant(lty, val)
return impl_ret_untracked(context, builder, lty, res)
# -----------------------------------------------------------------------------
from numba.core.typing.builtins import IndexValue, IndexValueType
from numba.extending import overload, register_jitable
@lower_builtin(IndexValue, types.intp, types.Type)
@lower_builtin(IndexValue, types.uintp, types.Type)
def impl_index_value(context, builder, sig, args):
typ = sig.return_type
index, value = args
index_value = cgutils.create_struct_proxy(typ)(context, builder)
index_value.index = index
index_value.value = value
return index_value._getvalue()
@overload(min)
def indval_min(indval1, indval2):
if isinstance(indval1, IndexValueType) and \
isinstance(indval2, IndexValueType):
def min_impl(indval1, indval2):
if np.isnan(indval1.value):
if np.isnan(indval2.value):
# both indval1 and indval2 are nans so order by index
if indval1.index < indval2.index:
return indval1
else:
return indval2
else:
# comparing against one nan always considered less
return indval1
elif np.isnan(indval2.value):
# indval1 not a nan but indval2 is so consider indval2 less
return indval2
elif indval1.value > indval2.value:
return indval2
elif indval1.value == indval2.value:
if indval1.index < indval2.index:
return indval1
else:
return indval2
return indval1
return min_impl
@overload(min)
def boolval_min(val1, val2):
if isinstance(val1, types.Boolean) and \
isinstance(val2, types.Boolean):
def bool_min_impl(val1, val2):
return val1 and val2
return bool_min_impl
@overload(max)
def indval_max(indval1, indval2):
if isinstance(indval1, IndexValueType) and \
isinstance(indval2, IndexValueType):
def max_impl(indval1, indval2):
if np.isnan(indval1.value):
if np.isnan(indval2.value):
# both indval1 and indval2 are nans so order by index
if indval1.index < indval2.index:
return indval1
else:
return indval2
else:
# comparing against one nan always considered larger
return indval1
elif np.isnan(indval2.value):
# indval1 not a nan but indval2 is so consider indval2 larger
return indval2
elif indval2.value > indval1.value:
return indval2
elif indval1.value == indval2.value:
if indval1.index < indval2.index:
return indval1
else:
return indval2
return indval1
return max_impl
@overload(max)
def boolval_max(val1, val2):
if isinstance(val1, types.Boolean) and \
isinstance(val2, types.Boolean):
def bool_max_impl(val1, val2):
return val1 or val2
return bool_max_impl
greater_than = register_jitable(lambda a, b: a > b)
less_than = register_jitable(lambda a, b: a < b)
@register_jitable
def min_max_impl(iterable, op):
if isinstance(iterable, types.IterableType):
def impl(iterable):
it = iter(iterable)
return_val = next(it)
for val in it:
if op(val, return_val):
return_val = val
return return_val
return impl
@overload(min)
def iterable_min(iterable):
return min_max_impl(iterable, less_than)
@overload(max)
def iterable_max(iterable):
return min_max_impl(iterable, greater_than)
@lower_builtin(types.TypeRef, types.VarArg(types.Any))
def redirect_type_ctor(context, builder, sig, args):
"""Redirect constructor implementation to `numba_typeref_ctor(cls, *args)`,
which should be overloaded by the type's implementation.
For example:
d = Dict()
`d` will be typed as `TypeRef[DictType]()`. Thus, it will call into this
implementation. We need to redirect the lowering to a function
named ``numba_typeref_ctor``.
"""
cls = sig.return_type
def call_ctor(cls, *args):
return numba_typeref_ctor(cls, *args)
# Pack arguments into a tuple for `*args`
ctor_args = types.Tuple.from_types(sig.args)
# Make signature T(TypeRef[T], *args) where T is cls
sig = typing.signature(cls, types.TypeRef(cls), ctor_args)
if len(ctor_args) > 0:
args = (context.get_dummy_value(), # Type object has no runtime repr.
context.make_tuple(builder, ctor_args, args))
else:
args = (context.get_dummy_value(), # Type object has no runtime repr.
context.make_tuple(builder, ctor_args, ()))
return context.compile_internal(builder, call_ctor, sig, args)
@overload(sum)
def ol_sum(iterable, start=0):
# Cpython explicitly rejects strings, bytes and bytearrays
# https://github.com/python/cpython/blob/3.9/Python/bltinmodule.c#L2310-L2329 # noqa: E501
error = None
if isinstance(start, types.UnicodeType):
error = ('strings', '')
elif isinstance(start, types.Bytes):
error = ('bytes', 'b')
elif isinstance(start, types.ByteArray):
error = ('bytearray', 'b')
if error is not None:
msg = "sum() can't sum {} [use {}''.join(seq) instead]".format(*error)
raise TypingError(msg)
# if the container is homogeneous then it's relatively easy to handle.
if isinstance(iterable, (types.containers._HomogeneousTuple, types.List,
types.ListType, types.Array, types.RangeType)):
iterator = iter
elif isinstance(iterable, (types.containers._HeterogeneousTuple)):
# if container is heterogeneous then literal unroll and hope for the
# best.
iterator = literal_unroll
else:
return None
def impl(iterable, start=0):
acc = start
for x in iterator(iterable):
# This most likely widens the type, this is expected Numba behaviour
acc = acc + x
return acc
return impl
# ------------------------------------------------------------------------------
# map, filter, reduce
@overload(map)
def ol_map(func, iterable, *args):
def impl(func, iterable, *args):
for x in zip(iterable, *args):
yield func(*x)
return impl
@overload(filter)
def ol_filter(func, iterable):
if (func is None) or isinstance(func, types.NoneType):
def impl(func, iterable):
for x in iterable:
if x:
yield x
else:
def impl(func, iterable):
for x in iterable:
if func(x):
yield x
return impl
@overload(isinstance)
def ol_isinstance(var, typs):
def true_impl(var, typs):
return True
def false_impl(var, typs):
return False
var_ty = as_numba_type(var)
if isinstance(var_ty, types.Optional):
msg = f'isinstance cannot handle optional types. Found: "{var_ty}"'
raise NumbaTypeError(msg)
# NOTE: The current implementation of `isinstance` restricts the type of the
# instance variable to types that are well known and in common use. The
# danger of unrestricted type comparison is that a "default" of `False` is
# required and this means that if there is a bug in the logic of the
# comparison tree `isinstance` returns False! It's therefore safer to just
# reject the compilation as untypable!
supported_var_ty = (types.Number, types.Bytes, types.RangeType,
types.DictType, types.LiteralStrKeyDict, types.List,
types.ListType, types.Tuple, types.UniTuple, types.Set,
types.Function, types.ClassType, types.UnicodeType,
types.ClassInstanceType, types.NoneType, types.Array,
types.Boolean, types.Float, types.UnicodeCharSeq,
types.Complex, types.NPDatetime, types.NPTimedelta,)
if not isinstance(var_ty, supported_var_ty):
msg = f'isinstance() does not support variables of type "{var_ty}".'
raise NumbaTypeError(msg)
t_typs = typs
# Check the types that the var can be an instance of, it'll be a scalar,
# a unituple or a tuple.
if isinstance(t_typs, types.UniTuple):
# corner case - all types in isinstance are the same
t_typs = (t_typs.key[0])
if not isinstance(t_typs, types.Tuple):
t_typs = (t_typs, )
for typ in t_typs:
if isinstance(typ, types.Function):
key = typ.key[0] # functions like int(..), float(..), str(..)
elif isinstance(typ, types.ClassType):
key = typ # jitclasses
else:
key = typ.key
# corner cases for bytes, range, ...
# avoid registering those types on `as_numba_type`
types_not_registered = {
bytes: types.Bytes,
range: types.RangeType,
dict: (types.DictType, types.LiteralStrKeyDict),
list: types.List,
tuple: types.BaseTuple,
set: types.Set,
}
if key in types_not_registered:
if isinstance(var_ty, types_not_registered[key]):
return true_impl
continue
if isinstance(typ, types.TypeRef):
# Use of Numba type classes is in general not supported as they do
# not work when the jit is disabled.
if key not in (types.ListType, types.DictType):
msg = ("Numba type classes (except numba.typed.* container "
"types) are not supported.")
raise NumbaTypeError(msg)
# Case for TypeRef (i.e. isinstance(var, typed.List))
# var_ty == ListType[int64] (instance)
# typ == types.ListType (class)
return true_impl if type(var_ty) is key else false_impl
else:
numba_typ = as_numba_type(key)
if var_ty == numba_typ:
return true_impl
elif isinstance(numba_typ, (types.NPDatetime, types.NPTimedelta)):
if isinstance(var_ty, type(numba_typ)):
return true_impl
elif isinstance(numba_typ, types.ClassType) and \
isinstance(var_ty, types.ClassInstanceType) and \
var_ty.key == numba_typ.instance_type.key:
# check for jitclasses
return true_impl
elif isinstance(numba_typ, types.Container) and \
numba_typ.key[0] == types.undefined:
# check for containers (list, tuple, set, ...)
if isinstance(var_ty, numba_typ.__class__) or \
(isinstance(var_ty, types.BaseTuple) and \
isinstance(numba_typ, types.BaseTuple)):
return true_impl
return false_impl
# -- getattr implementation
def _getattr_raise_attr_exc(obj, name):
# Dummy function for the purpose of creating an overloadable stub from
# which to raise an AttributeError as needed
pass
@overload(_getattr_raise_attr_exc)
def ol__getattr_raise_attr_exc(obj, name):
if not isinstance(name, types.StringLiteral):
raise RequireLiteralValue("argument 'name' must be a literal string")
lname = name.literal_value
message = f"'{obj}' has no attribute '{lname}'"
def impl(obj, name):
raise AttributeError(message)
return impl
@intrinsic
def resolve_getattr(tyctx, obj, name, default):
if not isinstance(name, types.StringLiteral):
raise RequireLiteralValue("argument 'name' must be a literal string")
lname = name.literal_value
fn = tyctx.resolve_getattr(obj, lname)
# Cannot handle things like `getattr(np, 'cos')` as the return type is
# types.Function.
if isinstance(fn, types.Function):
msg = ("Returning function objects is not implemented. "
f"getattr() was requested to return {fn} from attribute "
f"'{lname}' of {obj}.")
raise TypingError(msg)
if fn is None: # No attribute
# if default is not _getattr_default then return the default
if not (isinstance(default, types.NamedTuple) and
default.instance_class == _getattr_default_type):
# it's not the marker default value, so return it
sig = default(obj, name, default)
def impl(cgctx, builder, sig, llargs):
tmp = llargs[-1]
cgctx.nrt.incref(builder, default, tmp)
return tmp
else:
# else wire in raising an AttributeError
fnty = tyctx.resolve_value_type(_getattr_raise_attr_exc)
raise_sig = fnty.get_call_type(tyctx, (obj, name), {})
sig = types.none(obj, name, default)
def impl(cgctx, builder, sig, llargs):
native_impl = cgctx.get_function(fnty, raise_sig)
return native_impl(builder, llargs[:-1])
else: # Attribute present, wire in handing it back to the overload(getattr)
sig = fn(obj, name, default)
if isinstance(fn, types.BoundFunction):
# It's a method on an object
def impl(cgctx, builder, sig, ll_args):
cast_type = fn.this
casted = cgctx.cast(builder, ll_args[0], obj, cast_type)
res = cgctx.get_bound_function(builder, casted, cast_type)
cgctx.nrt.incref(builder, fn, res)
return res
else:
# Else it's some other type of attribute.
# Ensure typing calls occur at typing time, not at lowering
attrty = tyctx.resolve_getattr(obj, lname)
def impl(cgctx, builder, sig, ll_args):
attr_impl = cgctx.get_getattr(obj, lname)
res = attr_impl(cgctx, builder, obj, ll_args[0], lname)
casted = cgctx.cast(builder, res, attrty, fn)
cgctx.nrt.incref(builder, fn, casted)
return casted
return sig, impl
# These are marker objects to indicate "no default has been provided" in a call
_getattr_default_type = namedtuple('_getattr_default_type', '')
_getattr_default = _getattr_default_type()
# getattr with no default arg, obj is an open type and name is forced as a
# literal string. The _getattr_default marker is used to indicate "no default
# was provided".
@overload(getattr, prefer_literal=True)
def ol_getattr_2(obj, name):
def impl(obj, name):
return resolve_getattr(obj, name, _getattr_default)
return impl
# getattr with default arg present, obj is an open type, name is forced as a
# literal string, the "default" is again an open type. Note that the CPython
# definition is: `getattr(object, name[, default]) -> value`, the `default`
# is not a kwarg.
@overload(getattr)
def ol_getattr_3(obj, name, default):
def impl(obj, name, default):
return resolve_getattr(obj, name, default)
return impl
@intrinsic
def resolve_hasattr(tyctx, obj, name):
if not isinstance(name, types.StringLiteral):
raise RequireLiteralValue("argument 'name' must be a literal string")
lname = name.literal_value
fn = tyctx.resolve_getattr(obj, lname)
# Whilst technically the return type could be a types.bool_, the literal
# value is resolvable at typing time. Propagating this literal information
# into the type system allows the compiler to prune branches based on a
# hasattr predicate. As a result the signature is based on literals. This is
# "safe" because the overload requires a literal string so each will be a
# different variant of (obj, literal(name)) -> literal(bool).
if fn is None:
retty = types.literal(False)
else:
retty = types.literal(True)
sig = retty(obj, name)
def impl(cgctx, builder, sig, ll_args):
return cgutils.false_bit if fn is None else cgutils.true_bit
return sig, impl
# hasattr cannot be implemented as a getattr call and then catching
# AttributeError because Numba doesn't support catching anything other than
# "Exception", so lacks the specificity required. Instead this implementation
# tries to resolve the attribute via typing information and returns True/False
# based on that.
@overload(hasattr)
def ol_hasattr(obj, name):
def impl(obj, name):
return resolve_hasattr(obj, name)
return impl
@overload(repr)
def ol_repr_generic(obj):
missing_repr_format = f"<object type:{obj}>"
def impl(obj):
attr = '__repr__'
if hasattr(obj, attr) == True:
return getattr(obj, attr)()
else:
# There's no __str__ or __repr__ defined for this object, return
# something generic
return missing_repr_format
return impl
@overload(str)
def ol_str_generic(object=''):
def impl(object=""):
attr = '__str__'
if hasattr(object, attr) == True:
return getattr(object, attr)()
else:
return repr(object)
return impl
| MinValInfer |
python | docker__docker-py | tests/integration/models_resources_test.py | {
"start": 73,
"end": 614
} | class ____(BaseIntegrationTest):
def test_reload(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
first_started_at = container.attrs['State']['StartedAt']
container.kill()
container.start()
assert container.attrs['State']['StartedAt'] == first_started_at
container.reload()
assert container.attrs['State']['StartedAt'] != first_started_at
| ModelTest |
python | pandas-dev__pandas | pandas/tests/scalar/timestamp/test_timestamp.py | {
"start": 14032,
"end": 16305
} | class ____:
def test_nanosecond_string_parsing(self):
ts = Timestamp("2013-05-01 07:15:45.123456789")
# GH 7878
expected_repr = "2013-05-01 07:15:45.123456789"
expected_value = 1_367_392_545_123_456_789
assert ts._value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo")
assert ts._value == expected_value - 9 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC")
assert ts._value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern")
assert ts._value == expected_value + 4 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp("20130501T071545.123456789")
assert ts._value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1_293_840_000_000_000_005
t = Timestamp("2011-01-01") + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t._value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t._value == expected
assert t.nanosecond == 5
t = Timestamp("2011-01-01 00:00:00.000000005")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t._value == expected
assert t.nanosecond == 5
expected = 1_293_840_000_000_000_010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t._value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t._value == expected
assert t.nanosecond == 10
t = Timestamp("2011-01-01 00:00:00.000000010")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t._value == expected
assert t.nanosecond == 10
| TestTimestampNsOperations |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 75507,
"end": 90110
} | class ____(IR):
"""A join of two dataframes."""
__slots__ = ("left_on", "options", "right_on")
_non_child = ("schema", "left_on", "right_on", "options")
left_on: tuple[expr.NamedExpr, ...]
"""List of expressions used as keys in the left frame."""
right_on: tuple[expr.NamedExpr, ...]
"""List of expressions used as keys in the right frame."""
options: tuple[
Literal["Inner", "Left", "Right", "Full", "Semi", "Anti", "Cross"],
bool,
Zlice | None,
str,
bool,
Literal["none", "left", "right", "left_right", "right_left"],
]
"""
tuple of options:
- how: join type
- nulls_equal: do nulls compare equal?
- slice: optional slice to perform after joining.
- suffix: string suffix for right columns if names match
- coalesce: should key columns be coalesced (only makes sense for outer joins)
- maintain_order: which DataFrame row order to preserve, if any
"""
SWAPPED_ORDER: ClassVar[
dict[
Literal["none", "left", "right", "left_right", "right_left"],
Literal["none", "left", "right", "left_right", "right_left"],
]
] = {
"none": "none",
"left": "right",
"right": "left",
"left_right": "right_left",
"right_left": "left_right",
}
def __init__(
self,
schema: Schema,
left_on: Sequence[expr.NamedExpr],
right_on: Sequence[expr.NamedExpr],
options: Any,
left: IR,
right: IR,
):
self.schema = schema
self.left_on = tuple(left_on)
self.right_on = tuple(right_on)
self.options = options
self.children = (left, right)
self._non_child_args = (self.left_on, self.right_on, self.options)
@staticmethod
@cache
def _joiners(
how: Literal["Inner", "Left", "Right", "Full", "Semi", "Anti"],
) -> tuple[
Callable, plc.copying.OutOfBoundsPolicy, plc.copying.OutOfBoundsPolicy | None
]:
if how == "Inner":
return (
plc.join.inner_join,
plc.copying.OutOfBoundsPolicy.DONT_CHECK,
plc.copying.OutOfBoundsPolicy.DONT_CHECK,
)
elif how == "Left" or how == "Right":
return (
plc.join.left_join,
plc.copying.OutOfBoundsPolicy.DONT_CHECK,
plc.copying.OutOfBoundsPolicy.NULLIFY,
)
elif how == "Full":
return (
plc.join.full_join,
plc.copying.OutOfBoundsPolicy.NULLIFY,
plc.copying.OutOfBoundsPolicy.NULLIFY,
)
elif how == "Semi":
return (
plc.join.left_semi_join,
plc.copying.OutOfBoundsPolicy.DONT_CHECK,
None,
)
elif how == "Anti":
return (
plc.join.left_anti_join,
plc.copying.OutOfBoundsPolicy.DONT_CHECK,
None,
)
assert_never(how) # pragma: no cover
@staticmethod
def _reorder_maps(
left_rows: int,
lg: plc.Column,
left_policy: plc.copying.OutOfBoundsPolicy,
right_rows: int,
rg: plc.Column,
right_policy: plc.copying.OutOfBoundsPolicy,
*,
left_primary: bool = True,
stream: Stream,
) -> list[plc.Column]:
"""
Reorder gather maps to satisfy polars join order restrictions.
Parameters
----------
left_rows
Number of rows in left table
lg
Left gather map
left_policy
Nullify policy for left map
right_rows
Number of rows in right table
rg
Right gather map
right_policy
Nullify policy for right map
left_primary
Whether to preserve the left input row order first, and which
input stream to use for the primary sort.
Defaults to True.
stream
CUDA stream used for device memory operations and kernel launches.
Returns
-------
list[plc.Column]
Reordered left and right gather maps.
Notes
-----
When ``left_primary`` is True, the pair of gather maps is stably sorted by
the original row order of the left side, breaking ties by the right side.
And vice versa when ``left_primary`` is False.
"""
init = plc.Scalar.from_py(0, plc.types.SIZE_TYPE, stream=stream)
step = plc.Scalar.from_py(1, plc.types.SIZE_TYPE, stream=stream)
(left_order_col,) = plc.copying.gather(
plc.Table(
[
plc.filling.sequence(
left_rows,
init,
step,
stream=stream,
)
]
),
lg,
left_policy,
stream=stream,
).columns()
(right_order_col,) = plc.copying.gather(
plc.Table(
[
plc.filling.sequence(
right_rows,
init,
step,
stream=stream,
)
]
),
rg,
right_policy,
stream=stream,
).columns()
keys = (
plc.Table([left_order_col, right_order_col])
if left_primary
else plc.Table([right_order_col, left_order_col])
)
return plc.sorting.stable_sort_by_key(
plc.Table([lg, rg]),
keys,
[plc.types.Order.ASCENDING, plc.types.Order.ASCENDING],
[plc.types.NullOrder.AFTER, plc.types.NullOrder.AFTER],
stream=stream,
).columns()
@staticmethod
def _build_columns(
columns: Iterable[plc.Column],
template: Iterable[NamedColumn],
*,
left: bool = True,
empty: bool = False,
rename: Callable[[str], str] = lambda name: name,
stream: Stream,
) -> list[Column]:
if empty:
return [
Column(
plc.column_factories.make_empty_column(
col.dtype.plc_type, stream=stream
),
col.dtype,
name=rename(col.name),
)
for col in template
]
result = [
Column(new, col.dtype, name=rename(col.name))
for new, col in zip(columns, template, strict=True)
]
if left:
result = [
col.sorted_like(orig)
for col, orig in zip(result, template, strict=True)
]
return result
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="Join")
def do_evaluate(
cls,
left_on_exprs: Sequence[expr.NamedExpr],
right_on_exprs: Sequence[expr.NamedExpr],
options: tuple[
Literal["Inner", "Left", "Right", "Full", "Semi", "Anti", "Cross"],
bool,
Zlice | None,
str,
bool,
Literal["none", "left", "right", "left_right", "right_left"],
],
left: DataFrame,
right: DataFrame,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Evaluate and return a dataframe."""
stream = get_joined_cuda_stream(
context.get_cuda_stream, upstreams=(left.stream, right.stream)
)
how, nulls_equal, zlice, suffix, coalesce, maintain_order = options
if how == "Cross":
# Separate implementation, since cross_join returns the
# result, not the gather maps
if right.num_rows == 0:
left_cols = Join._build_columns(
[], left.columns, empty=True, stream=stream
)
right_cols = Join._build_columns(
[],
right.columns,
left=False,
empty=True,
rename=lambda name: name
if name not in left.column_names_set
else f"{name}{suffix}",
stream=stream,
)
result = DataFrame([*left_cols, *right_cols], stream=stream)
else:
columns = plc.join.cross_join(
left.table, right.table, stream=stream
).columns()
left_cols = Join._build_columns(
columns[: left.num_columns], left.columns, stream=stream
)
right_cols = Join._build_columns(
columns[left.num_columns :],
right.columns,
rename=lambda name: name
if name not in left.column_names_set
else f"{name}{suffix}",
left=False,
stream=stream,
)
result = DataFrame([*left_cols, *right_cols], stream=stream).slice(
zlice
)
else:
# how != "Cross"
# TODO: Waiting on clarity based on https://github.com/pola-rs/polars/issues/17184
left_on = DataFrame(
broadcast(*(e.evaluate(left) for e in left_on_exprs), stream=stream),
stream=stream,
)
right_on = DataFrame(
broadcast(*(e.evaluate(right) for e in right_on_exprs), stream=stream),
stream=stream,
)
null_equality = (
plc.types.NullEquality.EQUAL
if nulls_equal
else plc.types.NullEquality.UNEQUAL
)
join_fn, left_policy, right_policy = cls._joiners(how)
if right_policy is None:
# Semi join
lg = join_fn(left_on.table, right_on.table, null_equality, stream)
table = plc.copying.gather(left.table, lg, left_policy, stream=stream)
result = DataFrame.from_table(
table, left.column_names, left.dtypes, stream=stream
)
else:
if how == "Right":
# Right join is a left join with the tables swapped
left, right = right, left
left_on, right_on = right_on, left_on
maintain_order = Join.SWAPPED_ORDER[maintain_order]
lg, rg = join_fn(
left_on.table, right_on.table, null_equality, stream=stream
)
if (
how in ("Inner", "Left", "Right", "Full")
and maintain_order != "none"
):
lg, rg = cls._reorder_maps(
left.num_rows,
lg,
left_policy,
right.num_rows,
rg,
right_policy,
left_primary=maintain_order.startswith("left"),
stream=stream,
)
if coalesce:
if how == "Full":
# In this case, keys must be column references,
# possibly with dtype casting. We should use them in
# preference to the columns from the original tables.
# We need to specify `stream` here. We know that `{left,right}_on`
# is valid on `stream`, which is ordered after `{left,right}.stream`.
left = left.with_columns(
left_on.columns, replace_only=True, stream=stream
)
right = right.with_columns(
right_on.columns, replace_only=True, stream=stream
)
else:
right = right.discard_columns(right_on.column_names_set)
left = DataFrame.from_table(
plc.copying.gather(left.table, lg, left_policy, stream=stream),
left.column_names,
left.dtypes,
stream=stream,
)
right = DataFrame.from_table(
plc.copying.gather(right.table, rg, right_policy, stream=stream),
right.column_names,
right.dtypes,
stream=stream,
)
if coalesce and how == "Full":
left = left.with_columns(
(
Column(
plc.replace.replace_nulls(
left_col.obj, right_col.obj, stream=stream
),
name=left_col.name,
dtype=left_col.dtype,
)
for left_col, right_col in zip(
left.select_columns(left_on.column_names_set),
right.select_columns(right_on.column_names_set),
strict=True,
)
),
replace_only=True,
stream=stream,
)
right = right.discard_columns(right_on.column_names_set)
if how == "Right":
# Undo the swap for right join before gluing together.
left, right = right, left
right = right.rename_columns(
{
name: f"{name}{suffix}"
for name in right.column_names
if name in left.column_names_set
}
)
result = left.with_columns(right.columns, stream=stream)
result = result.slice(zlice)
# Join the original streams back into the result stream to ensure that the
# deallocations (on the original streams) happen after the result is ready
join_cuda_streams(
downstreams=(left.stream, right.stream), upstreams=(result.stream,)
)
return result
| Join |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/middleware/bash.py | {
"start": 425,
"end": 3203
} | class ____(ShellToolMiddleware):
"""Middleware that exposes Anthropic's native bash tool to models."""
def __init__(
self,
workspace_root: str | None = None,
*,
startup_commands: tuple[str, ...] | list[str] | str | None = None,
shutdown_commands: tuple[str, ...] | list[str] | str | None = None,
execution_policy: Any | None = None,
redaction_rules: tuple[Any, ...] | list[Any] | None = None,
tool_description: str | None = None,
env: dict[str, Any] | None = None,
) -> None:
"""Initialize middleware for Claude's native bash tool.
Args:
workspace_root: Base directory for the shell session.
If omitted, a temporary directory is created.
startup_commands: Optional commands executed after the session starts.
shutdown_commands: Optional commands executed before session shutdown.
execution_policy: Execution policy controlling timeouts and limits.
redaction_rules: Optional redaction rules to sanitize output.
tool_description: Optional override for tool description.
env: Optional environment variables for the shell session.
"""
super().__init__(
workspace_root=workspace_root,
startup_commands=startup_commands,
shutdown_commands=shutdown_commands,
execution_policy=execution_policy,
redaction_rules=redaction_rules,
tool_description=tool_description,
tool_name=BASH_TOOL_NAME,
shell_command=("/bin/bash",),
env=env,
)
# Parent class now creates the tool with name "bash" via tool_name parameter
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
"""Replace parent's shell tool with Claude's bash descriptor."""
filtered = [
t for t in request.tools if getattr(t, "name", None) != BASH_TOOL_NAME
]
tools = [*filtered, {"type": BASH_TOOL_TYPE, "name": BASH_TOOL_NAME}]
return handler(request.override(tools=tools))
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelResponse:
"""Async: replace parent's shell tool with Claude's bash descriptor."""
filtered = [
t for t in request.tools if getattr(t, "name", None) != BASH_TOOL_NAME
]
tools = [*filtered, {"type": BASH_TOOL_TYPE, "name": BASH_TOOL_NAME}]
return await handler(request.override(tools=tools))
__all__ = ["ClaudeBashToolMiddleware"]
| ClaudeBashToolMiddleware |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis22.py | {
"start": 315,
"end": 1399
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis22.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [86799104, 86801792]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"num_format": "#,##0.00"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | numba__numba | numba/tests/npyufunc/test_ufuncbuilding.py | {
"start": 15493,
"end": 16521
} | class ____(unittest.TestCase):
"""Test that numba ufuncs are compatible with dask collections and wrappers around
dask (e.g. xarray or pint) and that they can be serialized, sent over the network,
deserialized on a different host and applied remotely.
"""
def test_dask_array(self):
a = FakeDaskArray(np.arange(4, dtype=np.float64))
expect = np.arange(4, dtype=np.float64) * 2
@vectorize(["f8(f8)"])
def double_static_vectorize(x):
return x * 2
@vectorize()
def double_dynamic_vectorize(x):
return x * 2
@guvectorize(["f8,f8[:]"], "()->()")
def double_guvectorize(x, out):
out[:] = x * 2
for func in (
double_static_vectorize,
double_dynamic_vectorize,
double_guvectorize,
):
with self.subTest(func):
b = func(a)
assert isinstance(b, FakeDaskArray)
assert_array_equal(b.array, expect)
| TestDask |
python | kamyu104__LeetCode-Solutions | Python/count-numbers-with-unique-digits.py | {
"start": 365,
"end": 743
} | class ____(object):
def countNumbersWithUniqueDigits(self, n):
"""
:type n: int
:rtype: int
"""
fact = [1]*2
def nPr(n, k):
while len(fact) <= n: # lazy initialization
fact.append(fact[-1]*len(fact))
return fact[n]//fact[n-k]
return 1+9*sum(nPr(9, i) for i in xrange(n))
| Solution2 |
python | py-pdf__pypdf | pypdf/errors.py | {
"start": 1470,
"end": 1569
} | class ____(PdfReadError):
"""Raised when a PDF file is empty or has no content."""
| EmptyFileError |
python | matplotlib__matplotlib | lib/matplotlib/sphinxext/plot_directive.py | {
"start": 18169,
"end": 35592
} | class ____(RuntimeError):
pass
def _run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
pwd = os.getcwd()
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(f'{err}\n`plot_working_directory` option in '
f'Sphinx configuration file must be a valid '
f'directory path') from err
except TypeError as err:
raise TypeError(f'{err}\n`plot_working_directory` option in '
f'Sphinx configuration file must be a string or '
f'None') from err
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
with cbook._setattr_cm(
sys, argv=[code_path], path=[os.getcwd(), *sys.path]), \
contextlib.redirect_stdout(StringIO()):
try:
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
exec('import numpy as np\n'
'from matplotlib import pyplot as plt\n', ns)
else:
exec(str(setup.config.plot_pre_code), ns)
if "__main__" in code:
ns['__name__'] = '__main__'
# Patch out non-interactive show() to avoid triggering a warning.
with cbook._setattr_cm(FigureManagerBase, show=lambda self: None):
exec(code, ns)
if function_name is not None:
exec(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc()) from err
finally:
os.chdir(pwd)
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def get_plot_formats(config):
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
for fmt in plot_formats:
if isinstance(fmt, str):
if ':' in fmt:
suffix, dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif isinstance(fmt, (tuple, list)) and len(fmt) == 2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
return formats
def _parse_srcset(entries):
"""
Parse srcset for multiples...
"""
srcset = {}
for entry in entries:
entry = entry.strip()
if len(entry) >= 2:
mult = entry[:-1]
srcset[float(mult)] = entry
else:
raise ExtensionError(f'srcset argument {entry!r} is invalid.')
return srcset
def check_output_base_name(env, output_base):
docname = env.docname
if '.' in output_base or '/' in output_base or '\\' in output_base:
raise PlotError(
f"The filename-prefix '{output_base}' is invalid. "
f"It must not contain dots or slashes.")
for d in env.mpl_plot_image_basenames:
if output_base in env.mpl_plot_image_basenames[d]:
if d == docname:
raise PlotError(
f"The filename-prefix {output_base!r} is used multiple times.")
raise PlotError(f"The filename-prefix {output_base!r} is used multiple"
f"times (it is also used in {env.doc2path(d)}).")
env.mpl_plot_image_basenames[docname].add(output_base)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False,
code_includes=None):
"""
Run a pyplot script and save the images in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
if function_name is not None:
output_base = f'{output_base}_{function_name}'
formats = get_plot_formats(config)
# Try to determine if all images already exist
is_doctest, code_pieces = _split_code_at_show(code, function_name)
# Look for single-figure output files first
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if context or out_of_date(code_path, img.filename(format),
includes=code_includes):
all_exists = False
break
img.formats.append(format)
else:
all_exists = True
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
for i, code_piece in enumerate(code_pieces):
images = []
for j in itertools.count():
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j),
output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for fmt, dpi in formats:
if context or out_of_date(code_path, img.filename(fmt),
includes=code_includes):
all_exists = False
break
img.formats.append(fmt)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
else:
all_exists = True
if all_exists:
return results
# We didn't find the files, so build them
results = []
ns = plot_context if context else {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
_run_code(doctest.script_from_examples(code_piece) if is_doctest
else code_piece,
code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for fmt, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(fmt), dpi=dpi)
if fmt == formats[0][0] and config.plot_srcset:
# save a 2x, 3x etc version of the default...
srcset = _parse_srcset(config.plot_srcset)
for mult, suffix in srcset.items():
fm = f'{suffix}.{fmt}'
img.formats.append(fm)
figman.canvas.figure.savefig(img.filename(fm),
dpi=int(dpi * mult))
except Exception as err:
raise PlotError(traceback.format_exc()) from err
img.formats.append(fmt)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
document = state_machine.document
env = document.settings.env
config = env.config
nofigs = 'nofigs' in options
if config.plot_srcset and setup.app.builder.name == 'singlehtml':
raise ExtensionError(
'plot_srcset option not compatible with single HTML writer')
formats = get_plot_formats(config)
default_fmt = formats[0][0]
options.setdefault('include-source', config.plot_include_source)
options.setdefault('show-source-link', config.plot_html_show_source_link)
options.setdefault('filename-prefix', None)
if 'class' in options:
# classes are parsed into a list of string, and output by simply
# printing the list, abusing the fact that RST guarantees to strip
# non-conforming characters
options['class'] = ['plot-directive'] + options['class']
else:
options.setdefault('class', ['plot-directive'])
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# Enforce unambiguous use of captions.
if "caption" in options:
if caption:
raise ValueError(
'Caption specified in both content and options.'
' Please remove ambiguity.'
)
# Use caption option
caption = options["caption"]
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
code = Path(source_file_name).read_text(encoding='utf-8')
if options['filename-prefix']:
output_base = options['filename-prefix']
check_output_base_name(env, output_base)
else:
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
if options['filename-prefix']:
output_base = options['filename-prefix']
check_output_base_name(env, output_base)
else:
base, ext = os.path.splitext(os.path.basename(source_file_name))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = options.get('caption', '')
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name).lstrip(os.path.sep)
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
os.makedirs(build_dir, exist_ok=True)
# how to link to files from the RST file
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
# get list of included rst files so that the output is updated when any
# plots in the included files change. These attributes are modified by the
# include directive (see the docutils.parsers.rst.directives.misc module).
try:
source_file_includes = [os.path.join(os.getcwd(), t[0])
for t in state.document.include_log]
except AttributeError:
# the document.include_log attribute only exists in docutils >=0.17,
# before that we need to inspect the state machine
possible_sources = {os.path.join(setup.confdir, t[0])
for t in state_machine.input_lines.items}
source_file_includes = [f for f in possible_sources
if os.path.isfile(f)]
# remove the source file itself from the includes
try:
source_file_includes.remove(source_file_name)
except ValueError:
pass
# save script (if necessary)
if options['show-source-link']:
Path(build_dir, output_base + (source_ext or '.py')).write_text(
doctest.script_from_examples(code)
if source_file_name == rst_file and is_doctest
else code,
encoding='utf-8')
# make figures
try:
results = render_figures(code=code,
code_path=source_file_name,
output_dir=build_dir,
output_base=output_base,
context=keep_context,
function_name=function_name,
config=config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs',
code_includes=source_file_includes)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting {}\n from {}:\n{}".format(
output_base, source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
if caption and config.plot_srcset:
caption = ':caption: ' + caption.replace('\n', ' ')
elif caption:
caption = '\n' + '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['', *code_piece.splitlines()]
else:
lines = ['.. code-block:: python']
if 'code-caption' in options:
code_caption = options['code-caption'].replace('\n', ' ')
lines.append(f' :caption: {code_caption}')
lines.extend(['', *textwrap.indent(code_piece, ' ').splitlines()])
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
if 'alt' in options:
options['alt'] = options['alt'].replace('\n', ' ')
opts = [
f':{key}: {val}' for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
# Not-None src_name signals the need for a source download in the
# generated html
if j == 0 and options['show-source-link']:
src_name = output_base + (source_ext or '.py')
else:
src_name = None
if config.plot_srcset:
srcset = [*_parse_srcset(config.plot_srcset).values()]
template = TEMPLATE_SRCSET
else:
srcset = None
template = TEMPLATE
result = jinja2.Template(config.plot_template or template).render(
default_fmt=default_fmt,
build_dir=build_dir_link,
src_name=src_name,
multi_image=len(images) > 1,
options=opts,
srcset=srcset,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and len(images),
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
return errors
| PlotError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/unit_tests/test_migrations.py | {
"start": 3336,
"end": 7067
} | class ____:
test_unmigrated_config_path = MIGRATIONS_TEST_DIRECTORY / "unmigrated_config.json"
test_migrated_config_path = MIGRATIONS_TEST_DIRECTORY / "migrated_config.json"
def test_migrate_config(self, components_module):
try:
config_copy = dict(UNMIGRATED_CONFIG)
assert "account_type" not in config_copy
get_source(config_copy, config_path=str(self.test_unmigrated_config_path))
migrated_config = load_config(self.test_unmigrated_config_path)
assert migrated_config["account_type"] == "Seller"
assert isinstance(migrated_config["report_options_list"], list)
assert len(migrated_config["report_options_list"]) == 2
assert migrated_config["report_options_list"][0]["report_name"] == "GET_REPORT"
assert migrated_config["report_options_list"][0]["stream_name"] == "GET_REPORT"
assert migrated_config["report_options_list"][0]["options_list"][0]["option_name"] == "reportPeriod"
assert migrated_config["report_options_list"][0]["options_list"][0]["option_value"] == "WEEK"
assert migrated_config["report_options_list"][1]["report_name"] == "GET_REPORT_2"
assert migrated_config["report_options_list"][1]["stream_name"] == "GET_REPORT_2"
assert migrated_config["report_options_list"][1]["options_list"][0]["option_name"] == "reportPeriod_2"
assert migrated_config["report_options_list"][1]["options_list"][0]["option_value"] == "DAY"
finally:
with self.test_unmigrated_config_path.open("w") as f:
json.dump(config_copy, f)
def test_already_migrated_config(self, components_module):
try:
config_copy = dict(MIGRATED_CONFIG)
get_source(config_copy, config_path=str(self.test_migrated_config_path))
migrated_config = load_config(self.test_migrated_config_path)
print("migrated_config", migrated_config)
assert migrated_config["account_type"] == "Vendor"
assert migrated_config["report_options_list"] == MIGRATED_CONFIG["report_options_list"]
assert migrated_config["report_options_list"][0]["report_name"] == MIGRATED_CONFIG["report_options_list"][0]["report_name"]
assert migrated_config["report_options_list"][0]["stream_name"] == MIGRATED_CONFIG["report_options_list"][0]["stream_name"]
assert (
migrated_config["report_options_list"][0]["options_list"][0]["option_name"]
== MIGRATED_CONFIG["report_options_list"][0]["options_list"][0]["option_name"]
)
assert (
migrated_config["report_options_list"][0]["options_list"][0]["option_value"]
== MIGRATED_CONFIG["report_options_list"][0]["options_list"][0]["option_value"]
)
assert migrated_config["report_options_list"][1]["report_name"] == MIGRATED_CONFIG["report_options_list"][1]["report_name"]
assert migrated_config["report_options_list"][1]["stream_name"] == MIGRATED_CONFIG["report_options_list"][1]["stream_name"]
assert (
migrated_config["report_options_list"][1]["options_list"][0]["option_name"]
== MIGRATED_CONFIG["report_options_list"][1]["options_list"][0]["option_name"]
)
assert (
migrated_config["report_options_list"][1]["options_list"][0]["option_value"]
== MIGRATED_CONFIG["report_options_list"][1]["options_list"][0]["option_value"]
)
finally:
with self.test_migrated_config_path.open("w") as f:
json.dump(config_copy, f)
| TestMigrations |
python | jmcnamara__XlsxWriter | xlsxwriter/test/chartsheet/test_chartsheet01.py | {
"start": 347,
"end": 1479
} | class ____(unittest.TestCase):
"""
Test assembling a complete Chartsheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a chartsheet with no cell data."""
self.maxDiff = None
fh = StringIO()
chartsheet = Chartsheet()
chartsheet._set_filehandle(fh)
chartsheet.drawing = 1
chartsheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<chartsheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<sheetPr/>
<sheetViews>
<sheetView workbookViewId="0"/>
</sheetViews>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<drawing r:id="rId1"/>
</chartsheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleChartsheet |
python | great-expectations__great_expectations | great_expectations/profile/base.py | {
"start": 346,
"end": 974
} | class ____(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
| OrderedEnum |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_resource_claim_consumer_reference.py | {
"start": 383,
"end": 6991
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_group': 'str',
'name': 'str',
'resource': 'str',
'uid': 'str'
}
attribute_map = {
'api_group': 'apiGroup',
'name': 'name',
'resource': 'resource',
'uid': 'uid'
}
def __init__(self, api_group=None, name=None, resource=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ResourceClaimConsumerReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_group = None
self._name = None
self._resource = None
self._uid = None
self.discriminator = None
if api_group is not None:
self.api_group = api_group
self.name = name
self.resource = resource
self.uid = uid
@property
def api_group(self):
"""Gets the api_group of this V1beta1ResourceClaimConsumerReference. # noqa: E501
APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
:return: The api_group of this V1beta1ResourceClaimConsumerReference. # noqa: E501
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""Sets the api_group of this V1beta1ResourceClaimConsumerReference.
APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
:param api_group: The api_group of this V1beta1ResourceClaimConsumerReference. # noqa: E501
:type: str
"""
self._api_group = api_group
@property
def name(self):
"""Gets the name of this V1beta1ResourceClaimConsumerReference. # noqa: E501
Name is the name of resource being referenced. # noqa: E501
:return: The name of this V1beta1ResourceClaimConsumerReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta1ResourceClaimConsumerReference.
Name is the name of resource being referenced. # noqa: E501
:param name: The name of this V1beta1ResourceClaimConsumerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def resource(self):
"""Gets the resource of this V1beta1ResourceClaimConsumerReference. # noqa: E501
Resource is the type of resource being referenced, for example \"pods\". # noqa: E501
:return: The resource of this V1beta1ResourceClaimConsumerReference. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1beta1ResourceClaimConsumerReference.
Resource is the type of resource being referenced, for example \"pods\". # noqa: E501
:param resource: The resource of this V1beta1ResourceClaimConsumerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and resource is None: # noqa: E501
raise ValueError("Invalid value for `resource`, must not be `None`") # noqa: E501
self._resource = resource
@property
def uid(self):
"""Gets the uid of this V1beta1ResourceClaimConsumerReference. # noqa: E501
UID identifies exactly one incarnation of the resource. # noqa: E501
:return: The uid of this V1beta1ResourceClaimConsumerReference. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1beta1ResourceClaimConsumerReference.
UID identifies exactly one incarnation of the resource. # noqa: E501
:param uid: The uid of this V1beta1ResourceClaimConsumerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ResourceClaimConsumerReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ResourceClaimConsumerReference):
return True
return self.to_dict() != other.to_dict()
| V1beta1ResourceClaimConsumerReference |
python | google__pytype | pytype/tools/traces/traces_test.py | {
"start": 2760,
"end": 3829
} | class ____(unittest.TestCase):
"""Base class for testing traces.MatchAstVisitor."""
def _parse(self, text, options=None):
text = textwrap.dedent(text).lstrip()
return ast.parse(text), traces.trace(text, options)
def _get_traces(self, text, node_type, options=None):
module, src = self._parse(text, options)
v = _TestVisitor(src, ast)
v.visit(module)
return v.traces_by_node_type[node_type]
def assertTracesEqual(self, actual_traces, expected_traces):
self.assertEqual(len(actual_traces), len(expected_traces))
for trace, expected_trace in zip(actual_traces, expected_traces):
loc, trace = trace
expected_loc, expected_op, expected_symbol, expected_annots = (
expected_trace)
self.assertEqual(loc, expected_loc)
self.assertEqual(trace.op, expected_op)
self.assertEqual(trace.symbol, expected_symbol)
self.assertEqual(len(trace.types), len(expected_annots))
for t, annot in zip(trace.types, expected_annots):
self.assertEqual(pytd_utils.Print(t), annot)
| MatchAstTestCase |
python | facebook__pyre-check | tools/generate_taint_models/tests/get_annotated_free_functions_with_decorator_test.py | {
"start": 635,
"end": 21207
} | class ____(unittest.TestCase):
def assert_expected_annotations(
self,
source: str,
annotation_specifications: List[DecoratorAnnotationSpecification],
expected: Set[str],
) -> None:
cleaned_source = textwrap.dedent(source)
with patch("builtins.open", mock_open(read_data=cleaned_source)):
generator = AnnotatedFreeFunctionWithDecoratorGenerator(
root="/root", annotation_specifications=annotation_specifications
)
self.assertSetEqual(
{
str(model)
for model in generator._annotate_functions("/root/module.py")
},
set(expected),
)
def test_model_generation(self) -> None:
# Test argument annotations only.
self.assert_expected_annotations(
"""
@target_decorator
def decorated(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg")
),
)
],
{"def module.decorated(arg1: Arg, arg2: Arg, *v, **kw): ..."},
)
# Test argument annotations with unnamed attributed_decorator.
self.assert_expected_annotations(
"""
@target_decorator("some_attribute")
def decorated_unnamed_attributes(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator='@target_decorator("some_attribute")',
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg")
),
)
],
{
"def module.decorated_unnamed_attributes(arg1: Arg, arg2: Arg, "
"*v, **kw): ..."
},
)
# Test argument annotations with named attributed_decorator.
self.assert_expected_annotations(
"""
@target_decorator(key="value")
def decorated_named_attributes(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator='@target_decorator(key="value")',
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg")
),
)
],
{
"def module.decorated_named_attributes(arg1: Arg, arg2: Arg, *v, "
"**kw): ..."
},
)
# Test argument annotations with multiple filter criteria.
self.assert_expected_annotations(
"""
@target_decorator("some_attribute", "another_attribute", key="value", \
key2="another_value")
def decorated_multiple_filter_attributes(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator=(
'@target_decorator("some_attribute", "another_attribute", '
'key2="another_value")'
),
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg")
),
)
],
{
"def module.decorated_multiple_filter_attributes(arg1: Arg, "
"arg2: Arg, *v, **kw): ..."
},
)
# Test argument annotations with attributes not found.
self.assert_expected_annotations(
"""
@target_decorator("some_attribute", key="value")
def decorated_attributes_not_found(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator('some_attribute_not_found')",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg")
),
)
],
set(),
)
# Test vararg annotations only.
self.assert_expected_annotations(
"""
@target_decorator
def decorated(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(vararg="Vararg")
),
)
],
{"def module.decorated(arg1, arg2, *v: Vararg, **kw): ..."},
)
# Test kwarg annotations only.
self.assert_expected_annotations(
"""
@target_decorator
def decorated(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(kwarg="Kwarg")
),
)
],
{"def module.decorated(arg1, arg2, *v, **kw: Kwarg): ..."},
)
# Test return annotations only.
self.assert_expected_annotations(
"""
@target_decorator
def decorated(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(returns="Return"),
)
],
{"def module.decorated(arg1, arg2, *v, **kw) -> Return: ..."},
)
# Test parameter type whitelist
self.assert_expected_annotations(
"""
@target_decorator
async def decorated_async(arg1: str, arg2: int, arg3: bool, arg4):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg"),
returns="Return",
),
whitelist=WhitelistSpecification(parameter_type={"str", "int"}),
)
],
{
"def module.decorated_async(arg1, arg2, arg3: Arg, arg4: Arg"
") -> Return: ..."
},
)
# Test parameter name whitelist
self.assert_expected_annotations(
"""
@target_decorator
async def decorated_async(arg1: str, arg2: int, arg3: bool, arg4):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg"),
returns="Return",
),
whitelist=WhitelistSpecification(parameter_name={"arg1", "arg4"}),
)
],
{
"def module.decorated_async(arg1, arg2: Arg, arg3: Arg, arg4"
") -> Return: ..."
},
)
# Test async functions.
self.assert_expected_annotations(
"""
@target_decorator
async def decorated_async(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
),
)
],
{
"def module.decorated_async(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg) -> Return: ..."
},
)
# Test with other decorators present.
self.assert_expected_annotations(
"""
@random_decorator1
@target_decorator
@random_decorator2
def decorated_multi(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
),
)
],
{
"def module.decorated_multi(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg) -> Return: ..."
},
)
# Test functions that shouldn't trigger.
self.assert_expected_annotations(
"""
def undecorated():
pass
@random_decorator
@namespace.target_decorator
@namespace.target_decorator()
def decorated_with_random():
pass
class C:
@target_decorator
def my_fn():
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
),
)
],
set(),
)
# Test argument with target decorator attributes.
self.assert_expected_annotations(
"""
@target_decorator
def target_decorator_attributes(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator('some_attribute')",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg")
),
)
],
set(),
)
# Test callable decorators.
self.assert_expected_annotations(
"""
@target_decorator()
def decorated(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
)
),
)
],
{
"def module.decorated(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg): ..."
},
)
# Test everything at once.
self.assert_expected_annotations(
"""
def undecorated():
pass
@random_decorator
def decorated_with_random():
pass
class C:
@target_decorator
def my_fn():
pass
@target_decorator
def decorated(arg1: str, arg2, *v, **kw):
pass
@target_decorator
async def decorated_async(arg1: str, arg2, *v, **kw):
pass
@random_decorator1
@target_decorator
@random_decorator2
def decorated_multi(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
),
)
],
{
"def module.decorated(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg) -> Return: ...",
"def module.decorated_async(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg) -> Return: ...",
"def module.decorated_multi(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg) -> Return: ...",
},
)
# Test more than one specification.
self.assert_expected_annotations(
"""
def undecorated():
pass
@target_decorator1
def decorated1(arg: str, *v, **kw):
pass
@target_decorator2
def decorated2(arg: str, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator1",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg1", vararg="Vararg1", kwarg="Kwarg1"
),
returns="Return1",
),
),
DecoratorAnnotationSpecification(
decorator="@target_decorator2",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg2", vararg="Vararg2", kwarg="Kwarg2"
),
returns="Return2",
),
),
],
{
"def module.decorated1(arg: Arg1, *v: Vararg1, "
"**kw: Kwarg1) -> Return1: ...",
"def module.decorated2(arg: Arg2, *v: Vararg2, "
"**kw: Kwarg2) -> Return2: ...",
},
)
# Test unified annotations.
self.assert_expected_annotations(
"""
@target_decorator
def decorated(arg1: str, arg2, *v, **kw):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
),
)
],
{
"def module.decorated(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg) -> Return: ..."
},
)
# Test unified whitelist
self.assert_expected_annotations(
"""
@target_decorator
async def decorated_async(arg1: str, arg2: int, arg3: bool, arg4):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target_decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg"),
returns="Return",
),
whitelist=WhitelistSpecification(
parameter_type={"str", "int"}, parameter_name={"arg1", "arg4"}
),
)
],
{
"def module.decorated_async(arg1, arg2, arg3: Arg, arg4"
") -> Return: ..."
},
)
# Test function attribute decorator
self.assert_expected_annotations(
"""
@target.decorator
def decorated(arg1: str, arg2: int):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target.decorator",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg"),
returns="Return",
),
)
],
{"def module.decorated(arg1: Arg, arg2: Arg) -> Return: ..."},
)
# Test function attribute decorator with args
self.assert_expected_annotations(
"""
@target.decorator(darg, foo, kwdarg=val, blarg=val)
def decorated(arg1: str, arg2: int):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target.decorator(darg, kwdarg=val)",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg"),
returns="Return",
),
)
],
{"def module.decorated(arg1: Arg, arg2: Arg) -> Return: ..."},
)
# negative match for attribute decorator
self.assert_expected_annotations(
"""
@target.shmecorator(darg, foo, kwdarg=val, blarg=val)
def decorated(arg1: str, arg2: int):
pass
@shmarget.decorator(darg, foo, kwdarg=val, blarg=val)
def decorated(arg1: str, arg2: int):
pass
""",
[
DecoratorAnnotationSpecification(
decorator="@target.decorator(darg, kwdarg=val)",
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(arg="Arg"),
returns="Return",
),
)
],
set(),
)
# pyre-ignore[56]: Pyre was not able to infer the type of argument
# `get_annotated_free_functions_with_decorator`
@patch.object(
annotated_function_generator,
"find_all_paths",
return_value=["/root/one.py", "/root/two.py", "/root/three.py"],
)
def test_path_selection(self, find_paths_mock: MagicMock) -> None:
unused_specification = [DecoratorAnnotationSpecification(decorator="unused")]
# Using user-provided paths
self.assertListEqual(
AnnotatedFreeFunctionWithDecoratorGenerator(
root="/root",
annotation_specifications=unused_specification,
paths=["/custom/one.py", "/custom/two.py"],
).paths,
["/custom/one.py", "/custom/two.py"],
)
find_paths_mock.assert_not_called()
# Extracting paths from root
self.assertListEqual(
AnnotatedFreeFunctionWithDecoratorGenerator(
root="/root", annotation_specifications=unused_specification
).paths,
["/root/one.py", "/root/two.py", "/root/three.py"],
)
find_paths_mock.assert_called_with("/root")
# Filtering paths with excluded
self.assertListEqual(
AnnotatedFreeFunctionWithDecoratorGenerator(
root="/root",
annotation_specifications=unused_specification,
exclude_paths=[r"one\.py", r"two\.py"],
).paths,
["/root/three.py"],
)
| AnnotatedFreeFunctionWithDecoratorGeneratorTest |
python | getsentry__sentry | src/sentry/middleware/sudo.py | {
"start": 112,
"end": 630
} | class ____(BaseSudoMiddleware):
def has_sudo_privileges(self, request: HttpRequest) -> bool:
# Right now, only password reauthentication (django-sudo) is supported,
# so if a user doesn't have a password (for example, only has github auth)
# then we shouldn't prompt them for the password they don't have.
user = request.user
if user.is_authenticated and not user.has_usable_password():
return True
return super().has_sudo_privileges(request)
| SudoMiddleware |
python | openai__openai-python | src/openai/types/realtime/response_audio_transcript_delta_event.py | {
"start": 210,
"end": 786
} | class ____(BaseModel):
content_index: int
"""The index of the content part in the item's content array."""
delta: str
"""The transcript delta."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.output_audio_transcript.delta"]
"""The event type, must be `response.output_audio_transcript.delta`."""
| ResponseAudioTranscriptDeltaEvent |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 68134,
"end": 68454
} | class ____(_PrintableStructure):
_fields_ = [
('timeStamp', c_ulonglong),
('pid', c_uint),
('smUtil', c_uint),
('memUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint),
('jpgUtil', c_uint),
('ofaUtil', c_uint),
]
| c_nvmlProcessUtilizationInfo_v1_t |
python | numba__llvmlite | llvmlite/ir/types.py | {
"start": 242,
"end": 2565
} | class ____(_StrCaching):
"""
The base class for all LLVM types.
"""
is_pointer = False
null = 'zeroinitializer'
def __repr__(self):
return "<%s %s>" % (type(self), str(self))
def _to_string(self):
raise NotImplementedError
def as_pointer(self, addrspace=0):
return PointerType(self, addrspace)
def __ne__(self, other):
return not (self == other)
def _get_ll_global_value_type(self, target_data, context=None):
"""
Convert this type object to an LLVM type.
"""
from llvmlite.ir import Module, GlobalVariable
from llvmlite.binding import parse_assembly
if context is None:
m = Module()
else:
m = Module(context=context)
foo = GlobalVariable(m, self, name="foo")
with parse_assembly(str(m)) as llmod:
return llmod.get_global_variable(foo.name).global_value_type
def get_abi_size(self, target_data, context=None):
"""
Get the ABI size of this type according to data layout *target_data*.
"""
llty = self._get_ll_global_value_type(target_data, context)
return target_data.get_abi_size(llty)
def get_element_offset(self, target_data, ndx, context=None):
llty = self._get_ll_global_value_type(target_data, context)
return target_data.get_element_offset(llty, ndx)
def get_abi_alignment(self, target_data, context=None):
"""
Get the minimum ABI alignment of this type according to data layout
*target_data*.
"""
llty = self._get_ll_global_value_type(target_data, context)
return target_data.get_abi_alignment(llty)
def format_constant(self, value):
"""
Format constant *value* of this type. This method may be overriden
by subclasses.
"""
return str(value)
def wrap_constant_value(self, value):
"""
Wrap constant *value* if necessary. This method may be overriden
by subclasses (especially aggregate types).
"""
return value
def __call__(self, value):
"""
Create a LLVM constant of this type with the given Python value.
"""
from llvmlite.ir import Constant
return Constant(self, value)
| Type |
python | getsentry__sentry | src/sentry/grouping/enhancer/matchers.py | {
"start": 12395,
"end": 12454
} | class ____(FrameFieldMatch):
field = "module"
| ModuleMatch |
python | getsentry__sentry | tests/sentry/issues/test_occurrence_consumer.py | {
"start": 3144,
"end": 13222
} | class ____(IssueOccurrenceTestBase):
@django_db_all
def test_occurrence_consumer_with_event(self) -> None:
message = get_test_message(self.project.id)
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
fetched_occurrence = IssueOccurrence.fetch(occurrence.id, self.project.id)
assert fetched_occurrence is not None
self.assert_occurrences_identical(occurrence, fetched_occurrence)
assert fetched_occurrence.event_id is not None
fetched_event = self.eventstore.get_event_by_id(
self.project.id, fetched_occurrence.event_id
)
assert fetched_event is not None
assert fetched_event.get_event_type() == "generic"
assert Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).exists()
@django_db_all
def test_process_profiling_occurrence(self) -> None:
create_default_projects()
event_data = load_data("generic-event-profiling")
event_data["detection_time"] = datetime.datetime.now(tz=timezone.utc)
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(event_data)
assert result is not None
project_id = event_data["event"]["project_id"]
occurrence = result[0]
assert occurrence is not None
event = eventstore.backend.get_event_by_id(project_id, event_data["event"]["event_id"])
assert isinstance(event, Event)
assert event.group is not None
event_for_group = event.for_group(event.group)
assert event_for_group.occurrence_id == occurrence.id
fetched_occurrence = IssueOccurrence.fetch(occurrence.id, project_id)
assert fetched_occurrence is not None
self.assert_occurrences_identical(occurrence, fetched_occurrence)
assert fetched_occurrence.event_id is not None
fetched_event = self.eventstore.get_event_by_id(project_id, fetched_occurrence.event_id)
assert fetched_event is not None
assert fetched_event.get_event_type() == "generic"
assert Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).exists()
def test_invalid_event_payload(self) -> None:
message = get_test_message(self.project.id, event={"title": "no project id"})
with pytest.raises(InvalidEventPayloadError):
with self.feature("organizations:profile-file-io-main-thread-ingest"):
_process_message(message)
def test_invalid_occurrence_payload(self) -> None:
message = get_test_message(self.project.id, type=300)
with (mock.patch("sentry.issues.occurrence_consumer.metrics") as metrics,):
with self.feature("organizations:profile-file-io-main-thread-ingest"):
_process_message(message)
metrics.incr.assert_called_once_with(
"occurrence_ingest.invalid_group_type",
tags={"occurrence_type": 300},
)
def test_mismatch_event_ids(self) -> None:
message = deepcopy(get_test_message(self.project.id))
message["event_id"] = "id1"
message["event"]["event_id"] = "id2"
with pytest.raises(InvalidEventPayloadError):
with self.feature("organizations:profile-file-io-main-thread-ingest"):
_process_message(message)
@django_db_all
def test_occurrence_consumer_without_payload_type(self) -> None:
message = get_test_message(self.project.id)
message.pop("payload_type")
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
fetched_occurrence = IssueOccurrence.fetch(occurrence.id, self.project.id)
assert fetched_occurrence is not None
self.assert_occurrences_identical(occurrence, fetched_occurrence)
assert fetched_occurrence.event_id is not None
fetched_event = self.eventstore.get_event_by_id(
self.project.id, fetched_occurrence.event_id
)
assert fetched_event is not None
assert fetched_event.get_event_type() == "generic"
assert Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).exists()
def test_issue_platform_default_priority(self) -> None:
# test default priority of LOW
message = get_test_message(self.project.id)
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
group = Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).get()
assert group.priority == PriorityLevel.LOW
@with_feature("projects:first-event-severity-calculation")
@mock.patch("sentry.event_manager._get_severity_score")
def test_issue_platform_override_priority(
self, mock_get_severity_score: mock.MagicMock
) -> None:
# test explicitly set priority of HIGH
message = get_test_message(self.project.id)
message["priority"] = PriorityLevel.HIGH.value
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
assert mock_get_severity_score.call_count == 0
group = Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).get()
assert group.priority == PriorityLevel.HIGH
assert "severity" not in group.data["metadata"]
@with_feature("organizations:profile-file-io-main-thread-ingest")
def test_issue_platform_updates_priority(self) -> None:
# test explicitly set priority of HIGH
message = get_test_message(self.project.id)
message["priority"] = PriorityLevel.HIGH.value
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
group = Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).get()
assert group.priority == PriorityLevel.HIGH
# test that the priority is updated
message["priority"] = PriorityLevel.MEDIUM.value
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
group = Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).get()
assert group.priority == PriorityLevel.MEDIUM
def test_new_group_with_user_assignee(self) -> None:
message = get_test_message(self.project.id, assignee=f"user:{self.user.id}")
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
group = Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).get()
assignee = GroupAssignee.objects.get(group=group)
assert assignee.user_id == self.user.id
def test_new_group_with_team_assignee(self) -> None:
message = get_test_message(self.project.id, assignee=f"team:{self.team.id}")
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
group = Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).get()
assignee = GroupAssignee.objects.get(group=group)
assert assignee.team_id == self.team.id
def test_new_group_with_invalid_user_assignee(self) -> None:
other_user = self.create_user()
message = get_test_message(self.project.id, assignee=f"user:{other_user.id}")
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
group = Group.objects.filter(grouphash__hash=occurrence.fingerprint[0]).get()
with pytest.raises(GroupAssignee.DoesNotExist):
GroupAssignee.objects.get(group=group)
@mock.patch(
"sentry.issues.occurrence_consumer.rate_limiter.check_and_use_quotas",
return_value=[MockGranted(granted=False)],
)
def test_rate_limit(self, is_limited: mock.MagicMock) -> None:
message = get_test_message(self.project.id)
with (
self.feature("organizations:profile-file-io-main-thread-ingest"),
self.options({"issues.occurrence-consumer.rate-limit.enabled": True}),
):
result = _process_message(message)
assert result is None
@mock.patch(
"sentry.issues.occurrence_consumer.rate_limiter.check_and_use_quotas",
return_value=[MockGranted(granted=True)],
)
def test_rate_limit_granted(self, is_limited: mock.MagicMock) -> None:
message = get_test_message(self.project.id)
with (
self.feature("organizations:profile-file-io-main-thread-ingest"),
self.options({"issues.occurrence-consumer.rate-limit.enabled": True}),
):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
def test_occurrence_rate_limit_quota(self) -> None:
rate_limit_quota = Quota(**options.get("issues.occurrence-consumer.rate-limit.quota"))
assert rate_limit_quota.window_seconds == 3600
assert rate_limit_quota.granularity_seconds == 60
assert rate_limit_quota.limit == 1000
| IssueOccurrenceProcessMessageTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 339497,
"end": 340116
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("EnvironmentEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("Environment"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| EnvironmentConnection |
python | pytorch__pytorch | torch/fx/experimental/recording.py | {
"start": 19361,
"end": 19950
} | class ____(Exception):
def __init__(
self,
msg: str,
mismatched: list[tuple[str, str, str]],
) -> None:
details = "\n".join(
[
"\n".join(
[
f"==> {inner_msg}",
f" > Left: {str1}",
f" > Right: {str2}",
]
)
for inner_msg, str1, str2 in mismatched
]
)
super().__init__(
f"""\
ShapeEnv not equal: {msg}
{details}
"""
)
| NotEqualError |
python | python-pillow__Pillow | src/PIL/ImageFile.py | {
"start": 25976,
"end": 27727
} | class ____(PyCodec):
"""
Python implementation of a format decoder. Override this class and
add the decoding logic in the :meth:`decode` method.
See :ref:`Writing Your Own File Codec in Python<file-codecs-py>`
"""
_pulls_fd = False
@property
def pulls_fd(self) -> bool:
return self._pulls_fd
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
"""
Override to perform the decoding process.
:param buffer: A bytes object with the data to be decoded.
:returns: A tuple of ``(bytes consumed, errcode)``.
If finished with decoding return -1 for the bytes consumed.
Err codes are from :data:`.ImageFile.ERRORS`.
"""
msg = "unavailable in base decoder"
raise NotImplementedError(msg)
def set_as_raw(
self, data: bytes, rawmode: str | None = None, extra: tuple[Any, ...] = ()
) -> None:
"""
Convenience method to set the internal image from a stream of raw data
:param data: Bytes to be set
:param rawmode: The rawmode to be used for the decoder.
If not specified, it will default to the mode of the image
:param extra: Extra arguments for the decoder.
:returns: None
"""
if not rawmode:
rawmode = self.mode
d = Image._getdecoder(self.mode, "raw", rawmode, extra)
assert self.im is not None
d.setimage(self.im, self.state.extents())
s = d.decode(data)
if s[0] >= 0:
msg = "not enough image data"
raise ValueError(msg)
if s[1] != 0:
msg = "cannot decode image data"
raise ValueError(msg)
| PyDecoder |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_generators.py | {
"start": 16876,
"end": 19107
} | class ____(__TestCase):
def test_close_no_return_value(self):
def f():
yield
gen = f()
gen.send(None)
self.assertIsNone(gen.close())
def test_close_return_value(self):
def f():
try:
yield
# close() raises GeneratorExit here, which is caught
except GeneratorExit:
return 0
gen = f()
gen.send(None)
self.assertEqual(gen.close(), 0)
def test_close_not_catching_exit(self):
def f():
yield
# close() raises GeneratorExit here, which isn't caught and
# therefore propagates -- no return value
return 0
gen = f()
gen.send(None)
self.assertIsNone(gen.close())
def test_close_not_started(self):
def f():
try:
yield
except GeneratorExit:
return 0
gen = f()
self.assertIsNone(gen.close())
def test_close_exhausted(self):
def f():
try:
yield
except GeneratorExit:
return 0
gen = f()
next(gen)
with self.assertRaises(StopIteration):
next(gen)
self.assertIsNone(gen.close())
def test_close_closed(self):
def f():
try:
yield
except GeneratorExit:
return 0
gen = f()
gen.send(None)
self.assertEqual(gen.close(), 0)
self.assertIsNone(gen.close())
def test_close_raises(self):
def f():
try:
yield
except GeneratorExit:
pass
raise RuntimeError
gen = f()
gen.send(None)
with self.assertRaises(RuntimeError):
gen.close()
def test_close_releases_frame_locals(self):
# See gh-118272
class Foo:
pass
f = Foo()
f_wr = weakref.ref(f)
def genfn():
a = f
yield
g = genfn()
next(g)
del f
g.close()
support.gc_collect()
self.assertIsNone(f_wr())
| GeneratorCloseTest |
python | astropy__astropy | astropy/config/configuration.py | {
"start": 1315,
"end": 1635
} | class ____(AstropyWarning):
"""A Warning that is issued when the configuration value specified in the
astropy configuration file does not match the type expected for that
configuration value.
"""
# these are not in __all__ because it's not intended that a user ever see them
| InvalidConfigurationItemWarning |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/validators/test_base_workflow.py | {
"start": 3919,
"end": 13755
} | class ____(TestCase):
def setUp(self) -> None:
self.context = {
"organization": self.organization,
"request": self.make_request(user=self.user),
}
self.integration, self.org_integration = self.create_provider_integration_for(
provider="slack", organization=self.organization, user=self.user
)
self.valid_data = {
"name": "test",
"enabled": True,
"actionFilters": [],
"config": {
"frequency": 30,
},
"triggers": {
"logicType": "any",
"conditions": [],
},
}
def test_create__simple(self) -> None:
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is True
workflow = validator.create(validator.validated_data)
# workflow is created
assert workflow.id is not None
assert workflow.name == self.valid_data["name"]
assert workflow.enabled == self.valid_data["enabled"]
assert workflow.config == self.valid_data["config"]
assert workflow.organization_id == self.organization.id
assert workflow.created_by_id == self.user.id
def test_create__validate_triggers_empty(self) -> None:
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is True
workflow = validator.create(validator.validated_data)
assert workflow.when_condition_group is not None
assert workflow.when_condition_group.conditions.count() == 0
def test_create__validate_triggers_with_conditions(self) -> None:
self.valid_data["triggers"] = {
"logicType": "any",
"conditions": [
{
"type": Condition.EQUAL,
"comparison": 1,
"conditionResult": True,
}
],
}
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is True
workflow = validator.create(validator.validated_data)
trigger = workflow.when_condition_group
assert trigger is not None
assert trigger.conditions.count() == 1
trigger_condition = trigger.conditions.first()
assert trigger_condition is not None
assert trigger_condition.type == Condition.EQUAL
@mock.patch(
"sentry.workflow_engine.registry.action_handler_registry.get",
return_value=MockActionHandler,
)
@mock.patch(
"sentry.notifications.notification_action.registry.action_validator_registry.get",
return_value=MockActionValidatorTranslator,
)
def test_create__with_actions__creates_workflow_group(
self, mock_action_handler: mock.MagicMock, mock_action_validator: mock.MagicMock
) -> None:
self.valid_data["actionFilters"] = [
{
"actions": [
{
"type": Action.Type.SLACK,
"config": {"foo": "bar"},
"data": {"baz": "bar"},
"integrationId": self.integration.id,
}
],
"logicType": "any",
"conditions": [],
"organizationId": self.organization.id,
}
]
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is True
workflow = validator.create(validator.validated_data)
workflow_condition_group = workflow.workflowdataconditiongroup_set.first()
assert workflow_condition_group is not None
assert workflow_condition_group.condition_group.logic_type == "any"
@mock.patch(
"sentry.workflow_engine.registry.action_handler_registry.get",
return_value=MockActionHandler,
)
@mock.patch(
"sentry.notifications.notification_action.registry.action_validator_registry.get",
return_value=MockActionValidatorTranslator,
)
def test_create__with_actions__creates_action_group(
self, mock_action_handler: mock.MagicMock, mock_action_validator: mock.MagicMock
) -> None:
self.valid_data["actionFilters"] = [
{
"actions": [
{
"type": Action.Type.SLACK,
"config": {"foo": "bar"},
"data": {"baz": "bar"},
"integrationId": self.integration.id,
}
],
"logicType": "any",
"conditions": [],
"organizationId": self.organization.id,
}
]
validator = WorkflowValidator(data=self.valid_data, context=self.context)
assert validator.is_valid() is True
workflow = validator.create(validator.validated_data)
workflow_condition_group = workflow.workflowdataconditiongroup_set.first()
assert workflow_condition_group is not None
action_group_query = DataConditionGroupAction.objects.filter(
condition_group=workflow_condition_group.condition_group
)
assert action_group_query.count() == 1
action_group = action_group_query.first()
assert action_group is not None
# check the action / condition group
assert action_group.action.type == Action.Type.SLACK
assert action_group.condition_group.logic_type == "any"
def test_create__exceeds_workflow_limit(self) -> None:
REGULAR_LIMIT = 2
with self.settings(MAX_WORKFLOWS_PER_ORG=REGULAR_LIMIT):
# Create first workflow - should succeed
validator = WorkflowValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
workflow = validator.create(validator.validated_data)
assert workflow.id is not None
# Create second workflow - should succeed
self.valid_data["name"] = "test2"
validator = WorkflowValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
workflow = validator.create(validator.validated_data)
assert workflow.id is not None
# Try to create third workflow - should fail
self.valid_data["name"] = "test3"
validator = WorkflowValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
with pytest.raises(ValidationError) as excinfo:
validator.create(validator.validated_data)
assert excinfo.value.detail == [
ErrorDetail(
string=f"You may not exceed {REGULAR_LIMIT} workflows per organization.",
code="invalid",
)
]
def test_create__exceeds_more_workflow_limit(self) -> None:
REGULAR_LIMIT = 2
MORE_LIMIT = 4
with self.settings(
MAX_WORKFLOWS_PER_ORG=REGULAR_LIMIT, MAX_MORE_WORKFLOWS_PER_ORG=MORE_LIMIT
):
# First verify regular limit is enforced without the feature flag
# Create first REGULAR_LIMIT workflows - should succeed
for i in range(REGULAR_LIMIT):
self.valid_data["name"] = f"test{i}"
validator = WorkflowValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
workflow = validator.create(validator.validated_data)
assert workflow.id is not None
# Try to create workflow beyond regular limit - should fail
self.valid_data["name"] = f"test{REGULAR_LIMIT}"
validator = WorkflowValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
with pytest.raises(ValidationError) as excinfo:
validator.create(validator.validated_data)
assert excinfo.value.detail == [
ErrorDetail(
string=f"You may not exceed {REGULAR_LIMIT} workflows per organization.",
code="invalid",
)
]
# Now enable the feature flag and verify higher limit
with self.feature("organizations:more-workflows"):
# Create workflows up to MORE_LIMIT - should succeed
for i in range(REGULAR_LIMIT, MORE_LIMIT):
self.valid_data["name"] = f"test{i}"
validator = WorkflowValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
workflow = validator.create(validator.validated_data)
assert workflow.id is not None
# Try to create workflow beyond more limit - should fail
self.valid_data["name"] = f"test{MORE_LIMIT}"
validator = WorkflowValidator(data=self.valid_data, context=self.context)
validator.is_valid(raise_exception=True)
with pytest.raises(ValidationError) as excinfo:
validator.create(validator.validated_data)
assert excinfo.value.detail == [
ErrorDetail(
string=f"You may not exceed {MORE_LIMIT} workflows per organization.",
code="invalid",
)
]
@mock.patch(
"sentry.notifications.notification_action.registry.action_validator_registry.get",
return_value=MockActionValidatorTranslator,
)
| TestWorkflowValidatorCreate |
python | pikepdf__pikepdf | tests/test_filters.py | {
"start": 711,
"end": 2015
} | class ____(TokenFilter):
def __init__(self):
super().__init__()
self.names = []
self.rawnames = []
def handle_token(self, token):
if token.type_ == TokenType.name_:
self.names.append(token.value)
self.rawnames.append(token.raw_value)
return None
def test_token_eq_token():
token_42 = Token(TokenType.integer, b'42')
assert Token(TokenType.space, b' ') != token_42
assert Token(TokenType.integer, b'42') == token_42
assert token_42 != 42
assert repr(token_42) == "pikepdf.Token(TokenType.integer, b'42')"
@pytest.mark.parametrize(
'filter, expected',
[
(FilterThru, b'q\n144.0000 0 0 144.0000 0.0000 0.0000 cm\n/Im0 Do\nQ'),
(FilterDrop, b''),
(FilterNumbers, b'144.0000 0 0 144.0000 0.0000 0.0000 '),
],
)
def test_filter_thru(pal, filter, expected):
page = pal.pages[0]
page.add_content_token_filter(filter())
after = page.obj.Contents.read_bytes()
assert after == expected
def test_filter_names(pal):
page = pal.pages[0]
filter = FilterCollectNames()
result = page.get_filtered_contents(filter)
assert result == b''
assert filter.names == ['/Im0']
after = page.obj.Contents.read_bytes()
assert after != b''
| FilterCollectNames |
python | Delgan__loguru | loguru/_file_sink.py | {
"start": 5093,
"end": 14951
} | class ____:
def __init__(
self,
path,
*,
rotation=None,
retention=None,
compression=None,
delay=False,
watch=False,
mode="a",
buffering=1,
encoding="utf8",
**kwargs
):
self.encoding = encoding
self._kwargs = {**kwargs, "mode": mode, "buffering": buffering, "encoding": self.encoding}
self._path = str(path)
self._glob_patterns = self._make_glob_patterns(self._path)
self._rotation_function = self._make_rotation_function(rotation)
self._retention_function = self._make_retention_function(retention)
self._compression_function = self._make_compression_function(compression)
self._file = None
self._file_path = None
self._watch = watch
self._file_dev = -1
self._file_ino = -1
if not delay:
path = self._create_path()
self._create_dirs(path)
self._create_file(path)
def write(self, message):
if self._file is None:
path = self._create_path()
self._create_dirs(path)
self._create_file(path)
if self._watch:
self._reopen_if_needed()
if self._rotation_function is not None and self._rotation_function(message, self._file):
self._terminate_file(is_rotating=True)
self._file.write(message)
def stop(self):
if self._watch:
self._reopen_if_needed()
self._terminate_file(is_rotating=False)
def tasks_to_complete(self):
return []
def _create_path(self):
path = self._path.format_map({"time": FileDateFormatter()})
return os.path.abspath(path)
def _create_dirs(self, path):
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
def _create_file(self, path):
self._file = open(path, **self._kwargs)
self._file_path = path
if self._watch:
fileno = self._file.fileno()
result = os.fstat(fileno)
self._file_dev = result[ST_DEV]
self._file_ino = result[ST_INO]
def _close_file(self):
self._file.flush()
self._file.close()
self._file = None
self._file_path = None
self._file_dev = -1
self._file_ino = -1
def _reopen_if_needed(self):
# Implemented based on standard library:
# https://github.com/python/cpython/blob/cb589d1b/Lib/logging/handlers.py#L486
if not self._file:
return
filepath = self._file_path
try:
result = os.stat(filepath)
except FileNotFoundError:
result = None
if not result or result[ST_DEV] != self._file_dev or result[ST_INO] != self._file_ino:
self._close_file()
self._create_dirs(filepath)
self._create_file(filepath)
def _terminate_file(self, *, is_rotating=False):
old_path = self._file_path
if self._file is not None:
self._close_file()
if is_rotating:
new_path = self._create_path()
self._create_dirs(new_path)
if new_path == old_path:
creation_time = get_ctime(old_path)
root, ext = os.path.splitext(old_path)
renamed_path = generate_rename_path(root, ext, creation_time)
os.rename(old_path, renamed_path)
old_path = renamed_path
if is_rotating or self._rotation_function is None:
if self._compression_function is not None and old_path is not None:
self._compression_function(old_path)
if self._retention_function is not None:
logs = {
file
for pattern in self._glob_patterns
for file in glob.glob(pattern)
if os.path.isfile(file)
}
self._retention_function(list(logs))
if is_rotating:
self._create_file(new_path)
set_ctime(new_path, datetime.datetime.now().timestamp())
@staticmethod
def _make_glob_patterns(path):
formatter = string.Formatter()
tokens = formatter.parse(path)
escaped = "".join(glob.escape(text) + "*" * (name is not None) for text, name, *_ in tokens)
root, ext = os.path.splitext(escaped)
if not ext:
return [escaped, escaped + ".*"]
return [escaped, escaped + ".*", root + ".*" + ext, root + ".*" + ext + ".*"]
@staticmethod
def _make_rotation_function(rotation):
if rotation is None:
return None
if isinstance(rotation, (list, tuple, set)):
if len(rotation) == 0:
raise ValueError("Must provide at least one rotation condition")
return Rotation.RotationGroup(
[FileSink._make_rotation_function(rot) for rot in rotation]
)
if isinstance(rotation, str):
size = string_parsers.parse_size(rotation)
if size is not None:
return FileSink._make_rotation_function(size)
interval = string_parsers.parse_duration(rotation)
if interval is not None:
return FileSink._make_rotation_function(interval)
frequency = string_parsers.parse_frequency(rotation)
if frequency is not None:
return Rotation.RotationTime(frequency)
daytime = string_parsers.parse_daytime(rotation)
if daytime is not None:
day, time = daytime
if day is None:
return FileSink._make_rotation_function(time)
if time is None:
time = datetime.time(0, 0, 0)
step_forward = partial(Rotation.forward_weekday, weekday=day)
return Rotation.RotationTime(step_forward, time)
raise ValueError("Cannot parse rotation from: '%s'" % rotation)
if isinstance(rotation, (numbers.Real, decimal.Decimal)):
return partial(Rotation.rotation_size, size_limit=rotation)
if isinstance(rotation, datetime.time):
return Rotation.RotationTime(Rotation.forward_day, rotation)
if isinstance(rotation, datetime.timedelta):
step_forward = partial(Rotation.forward_interval, interval=rotation)
return Rotation.RotationTime(step_forward)
if callable(rotation):
return rotation
raise TypeError("Cannot infer rotation for objects of type: '%s'" % type(rotation).__name__)
@staticmethod
def _make_retention_function(retention):
if retention is None:
return None
if isinstance(retention, str):
interval = string_parsers.parse_duration(retention)
if interval is None:
raise ValueError("Cannot parse retention from: '%s'" % retention)
return FileSink._make_retention_function(interval)
if isinstance(retention, int):
return partial(Retention.retention_count, number=retention)
if isinstance(retention, datetime.timedelta):
return partial(Retention.retention_age, seconds=retention.total_seconds())
if callable(retention):
return retention
raise TypeError(
"Cannot infer retention for objects of type: '%s'" % type(retention).__name__
)
@staticmethod
def _make_compression_function(compression):
if compression is None:
return None
if isinstance(compression, str):
ext = compression.strip().lstrip(".")
if ext == "gz":
import gzip
compress = partial(Compression.copy_compress, opener=gzip.open, mode="wb")
elif ext == "bz2":
import bz2
compress = partial(Compression.copy_compress, opener=bz2.open, mode="wb")
elif ext == "xz":
import lzma
compress = partial(
Compression.copy_compress, opener=lzma.open, mode="wb", format=lzma.FORMAT_XZ
)
elif ext == "lzma":
import lzma
compress = partial(
Compression.copy_compress, opener=lzma.open, mode="wb", format=lzma.FORMAT_ALONE
)
elif ext == "tar":
import tarfile
compress = partial(Compression.add_compress, opener=tarfile.open, mode="w:")
elif ext == "tar.gz":
import gzip
import tarfile
compress = partial(Compression.add_compress, opener=tarfile.open, mode="w:gz")
elif ext == "tar.bz2":
import bz2
import tarfile
compress = partial(Compression.add_compress, opener=tarfile.open, mode="w:bz2")
elif ext == "tar.xz":
import lzma
import tarfile
compress = partial(Compression.add_compress, opener=tarfile.open, mode="w:xz")
elif ext == "zip":
import zipfile
compress = partial(
Compression.write_compress,
opener=zipfile.ZipFile,
mode="w",
compression=zipfile.ZIP_DEFLATED,
)
else:
raise ValueError("Invalid compression format: '%s'" % ext)
return partial(Compression.compression, ext="." + ext, compress_function=compress)
if callable(compression):
return compression
raise TypeError(
"Cannot infer compression for objects of type: '%s'" % type(compression).__name__
)
| FileSink |
python | etianen__django-reversion | tests/test_app/tests/test_models.py | {
"start": 9847,
"end": 10586
} | class ____(TestModelMixin, TestBase):
def testFieldDict(self):
with reversion.create_revision():
obj = TestModel.objects.create()
self.assertEqual(Version.objects.get_for_object(obj).get().field_dict, {
"id": obj.pk,
"name": "v1",
"related": [],
})
def testFieldDictM2M(self):
obj_related = TestModelRelated.objects.create()
with reversion.create_revision():
obj = TestModel.objects.create()
obj.related.add(obj_related)
self.assertEqual(Version.objects.get_for_object(obj).get().field_dict, {
"id": obj.pk,
"name": "v1",
"related": [obj_related.pk],
})
| FieldDictTest |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_percent_diff_greater_than_threshold.py | {
"start": 955,
"end": 7685
} | class ____(
DataProfilerProfileMetricProvider
):
metric_name = "data_profiler.profile_numeric_columns_percent_diff_greater_than_threshold"
value_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 - 22
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
profile_percent_diff = metrics.get("data_profiler.profile_percent_diff")
numeric_columns = metrics.get("data_profiler.profile_numeric_columns")
limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"]
numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"]
columns = list(profile_percent_diff["global_stats"]["profile_schema"][1].keys())
data_stats = profile_percent_diff["data_stats"]
requested_columns = {}
unavailable_stats = {}
# Adds columns if generic column key is provided
# Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly
limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys)
limit_check_report_keys_copy = replace_generic_operator_in_report_keys(
limit_check_report_keys_copy, numeric_columns
)
for col, stats in limit_check_report_keys_copy.items():
if col not in numeric_columns: # Makes sure column requested is numeric
requested_columns[col] = "Column is Non-Numeric"
continue
# adds stats if generic stat key is provided
numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics)
stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy)
if col not in columns: # Makes sure column exists within profile schema
requested_columns[col] = "Column requested was not found."
continue
col_data_stats = {}
for data_stat in data_stats:
if data_stat["column_name"] == col:
col_data_stats = data_stat["statistics"]
break
requested_columns[col] = {}
unavailable_stats[col] = {}
for stat, threshold in stats.items():
if stat not in col_data_stats:
requested_columns[col][stat] = "Statistic requested was not found."
continue
diff_val = col_data_stats[stat]
if diff_val == "ERR_divide_by_zero" or diff_val == "ERR_no_original_value":
unavailable_stats[col][stat] = diff_val
continue
if diff_val == "unchanged": # In the case there is no delta
diff_val = 0
above_threshold = is_value_greater_than_threshold(diff_val, threshold)
if not above_threshold:
requested_columns[col][stat] = {
"threshold": threshold,
"value_found": diff_val,
}
else:
requested_columns[col][stat] = True
for column in list(unavailable_stats.keys()):
if unavailable_stats[column] == {}:
unavailable_stats.pop(column, None)
if unavailable_stats != {}:
div_by_zero_stats = []
no_original_value = []
for column, stats in unavailable_stats.items():
current_col = copy.deepcopy(limit_check_report_keys_copy[column])
for stat, val in stats.items():
if val == "ERR_divide_by_zero":
div_by_zero_stats.append(column + ": " + stat)
current_col.pop(stat, None)
elif val == "ERR_no_original_value":
no_original_value.append(column + ": " + stat)
current_col.pop(stat, None)
limit_check_report_keys_copy[column] = current_col
warning = "\nWARNING:\n"
if len(div_by_zero_stats) > 0:
warning += "Div By Zero ERROR:\nValue in profile report was 0 for the following column: stat\n"
for div_by_zero_stat in div_by_zero_stats:
warning += " " + div_by_zero_stat + "\n"
if len(no_original_value) > 0:
warning += "Value not Found ERROR:\nStatistic was not found in profile report for the following column: stat\n"
for no_original_value_string in no_original_value:
warning += " " + no_original_value_string + "\n"
warning += "\nTo avoid these errors, you should use the replace 'limit_check_report_keys' with the following:\n"
warning += r"" + json.dumps(limit_check_report_keys_copy, indent=2)
warning += "\n"
warnings.warn(warning)
return requested_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== "data_profiler.profile_numeric_columns_percent_diff_greater_than_threshold"
):
dependencies["data_profiler.profile_percent_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_percent_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration(
metric_name="data_profiler.profile_numeric_columns",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| DataProfilerProfileNumericColumnsPercentDiffGreaterThanThreshold |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py | {
"start": 790,
"end": 2142
} | class ____(RpcAgentTestFixture):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.messages_to_fail = retryable_message_types
self.messages_to_delay = default_messages_to_delay
@property
def rpc_backend(self):
return rpc.backend_registry.BackendType["FAULTY_TENSORPIPE"]
@property
def rpc_backend_options(self):
return rpc.backend_registry.construct_rpc_backend_options(
self.rpc_backend,
init_method=self.init_method,
num_worker_threads=8,
num_fail_sends=3,
messages_to_fail=self.messages_to_fail,
messages_to_delay=self.messages_to_delay,
)
def setup_fault_injection(self, faulty_messages, messages_to_delay):
if faulty_messages is not None:
self.messages_to_fail = faulty_messages
if messages_to_delay is not None:
self.messages_to_delay = messages_to_delay
def get_shutdown_error_regex(self):
error_regexes = [
"Exception in thread pool task",
"Connection reset by peer",
"Connection closed by peer",
]
return "|".join([f"({error_str})" for error_str in error_regexes])
def get_timeout_error_regex(self):
return "RPC ran for more than"
| FaultyRpcAgentTestFixture |
python | aimacode__aima-python | utils4e.py | {
"start": 20635,
"end": 21711
} | class ____:
"""Given 'P |'==>'| Q, first form PartialExpr('==>', P), then combine with Q."""
def __init__(self, op, lhs):
self.op, self.lhs = op, lhs
def __or__(self, rhs):
return Expr(self.op, self.lhs, rhs)
def __repr__(self):
return "PartialExpr('{}', {})".format(self.op, self.lhs)
def expr(x):
"""Shortcut to create an Expression. x is a str in which:
- identifiers are automatically defined as Symbols.
- ==> is treated as an infix |'==>'|, as are <== and <=>.
If x is already an Expression, it is returned unchanged. Example:
>>> expr('P & Q ==> Q')
((P & Q) ==> Q)
"""
if isinstance(x, str):
return eval(expr_handle_infix_ops(x), defaultkeydict(Symbol))
else:
return x
infix_ops = '==> <== <=>'.split()
def expr_handle_infix_ops(x):
"""Given a str, return a new str with ==> replaced by |'==>'|, etc.
>>> expr_handle_infix_ops('P ==> Q')
"P |'==>'| Q"
"""
for op in infix_ops:
x = x.replace(op, '|' + repr(op) + '|')
return x
| PartialExpr |
python | kamyu104__LeetCode-Solutions | Python/find-and-replace-pattern.py | {
"start": 52,
"end": 530
} | class ____(object):
def findAndReplacePattern(self, words, pattern):
"""
:type words: List[str]
:type pattern: str
:rtype: List[str]
"""
def match(word):
lookup = {}
for x, y in itertools.izip(pattern, word):
if lookup.setdefault(x, y) != y:
return False
return len(set(lookup.values())) == len(lookup.values())
return filter(match, words)
| Solution |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_registries.py | {
"start": 833,
"end": 1196
} | class ____(GQLResult):
node: Optional[RegistryFragment]
FetchRegistries.model_rebuild()
FetchRegistriesOrganization.model_rebuild()
FetchRegistriesOrganizationOrgEntity.model_rebuild()
FetchRegistriesOrganizationOrgEntityProjects.model_rebuild()
FetchRegistriesOrganizationOrgEntityProjectsEdges.model_rebuild()
| FetchRegistriesOrganizationOrgEntityProjectsEdges |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1597099,
"end": 1600448
} | class ____(Transform):
"""
WindowTransform schema wrapper.
Parameters
----------
window : Sequence[dict, :class:`WindowFieldDef`]
The definition of the fields in the window, and what calculations to use.
frame : Sequence[float, None]
A frame specification as a two-element array indicating how the sliding window
should proceed. The array entries should either be a number indicating the offset
from the current data object, or null to indicate unbounded rows preceding or
following the current data object. The default value is ``[null, 0]``, indicating
that the sliding window includes the current object and all preceding objects. The
value ``[-5, 5]`` indicates that the window should include five objects preceding
and five objects following the current object. Finally, ``[null, null]`` indicates
that the window frame should always include all data objects. If you this frame and
want to assign the same value to add objects, you can use the simpler `join
aggregate transform <https://vega.github.io/vega-lite/docs/joinaggregate.html>`__.
The only operators affected are the aggregation operations and the ``first_value``,
``last_value``, and ``nth_value`` window operations. The other window operations are
not affected by this.
**Default value:**: ``[null, 0]`` (includes the current object and all preceding
objects)
groupby : Sequence[str, :class:`FieldName`]
The data fields for partitioning the data objects into separate windows. If
unspecified, all data points will be in a single window.
ignorePeers : bool
Indicates if the sliding window frame should ignore peer values (data that are
considered identical by the sort criteria). The default is false, causing the window
frame to expand to include all peer values. If set to true, the window frame will be
defined by offset values only. This setting only affects those operations that
depend on the window frame, namely aggregation operations and the first_value,
last_value, and nth_value window operations.
**Default value:** ``false``
sort : Sequence[dict, :class:`SortField`]
A sort field definition for sorting data objects within a window. If two data
objects are considered equal by the comparator, they are considered "peer" values of
equal rank. If sort is not specified, the order is undefined: data objects are
processed in the order they are observed and none are considered peers (the
ignorePeers parameter is ignored and treated as if set to ``true``).
"""
_schema = {"$ref": "#/definitions/WindowTransform"}
def __init__(
self,
window: Optional[Sequence[SchemaBase | Map]] = Undefined,
frame: Optional[Sequence[float | None]] = Undefined,
groupby: Optional[Sequence[str | SchemaBase]] = Undefined,
ignorePeers: Optional[bool] = Undefined,
sort: Optional[Sequence[SchemaBase | Map]] = Undefined,
**kwds,
):
super().__init__(
window=window,
frame=frame,
groupby=groupby,
ignorePeers=ignorePeers,
sort=sort,
**kwds,
)
| WindowTransform |
python | getsentry__sentry | src/sentry/core/endpoints/project_index.py | {
"start": 928,
"end": 4182
} | class ____(Endpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectPermission,)
def get(self, request: Request) -> Response:
"""
List your Projects
``````````````````
Return a list of projects available to the authenticated
session in a region.
:auth: required
"""
queryset = Project.objects.select_related("organization").distinct()
status = request.GET.get("status", "active")
if status == "active":
queryset = queryset.filter(status=ObjectStatus.ACTIVE)
elif status == "deleted":
queryset = queryset.exclude(status=ObjectStatus.ACTIVE)
elif status:
queryset = queryset.none()
if request.auth and not request.user.is_authenticated:
if request.auth.project_id:
queryset = queryset.filter(id=request.auth.project_id)
elif request.auth.organization_id is not None:
queryset = queryset.filter(organization_id=request.auth.organization_id)
else:
queryset = queryset.none()
elif not (is_active_superuser(request) and request.GET.get("show") == "all"):
if request.user.is_authenticated and request.user.is_sentry_app:
queryset = SentryAppInstallation.objects.get_projects(request.auth)
if isinstance(queryset, EmptyQuerySet):
raise AuthenticationFailed("Token not found")
else:
queryset = queryset.filter(teams__organizationmember__user_id=request.user.id)
org_id_filter = request.GET.get("organizationId", None)
if org_id_filter:
queryset = queryset.filter(organization_id=org_id_filter)
query = request.GET.get("query")
if query:
tokens = tokenize_query(query)
for key, value in tokens.items():
if key == "query":
value_s = " ".join(value)
queryset = queryset.filter(
Q(name__icontains=value_s) | Q(slug__icontains=value_s)
)
elif key == "slug":
queryset = queryset.filter(in_iexact("slug", value))
elif key == "name":
queryset = queryset.filter(in_iexact("name", value))
elif key == "platform":
queryset = queryset.filter(
id__in=ProjectPlatform.objects.filter(platform__in=value).values(
"project_id"
)
)
elif key == "dsn":
queryset = queryset.filter(key_set__public_key__in=value)
elif key == "id":
queryset = queryset.filter(id__in=value)
else:
queryset = queryset.none()
return self.paginate(
request=request,
queryset=queryset,
order_by="-date_added",
on_results=lambda x: serialize(x, request.user, ProjectWithOrganizationSerializer()),
paginator_cls=DateTimePaginator,
)
| ProjectIndexEndpoint |
python | ray-project__ray | rllib/utils/schedules/constant_schedule.py | {
"start": 292,
"end": 1002
} | class ____(Schedule):
"""A Schedule where the value remains constant over time."""
def __init__(self, value: float, framework: Optional[str] = None):
"""Initializes a ConstantSchedule instance.
Args:
value: The constant value to return, independently of time.
framework: The framework descriptor string, e.g. "tf",
"torch", or None.
"""
super().__init__(framework=framework)
self._v = value
@override(Schedule)
def _value(self, t: TensorType) -> TensorType:
return self._v
@override(Schedule)
def _tf_value_op(self, t: TensorType) -> TensorType:
return tf.constant(self._v)
| ConstantSchedule |
python | getsentry__sentry | src/sentry/api/endpoints/organization_sampling_project_rates.py | {
"start": 993,
"end": 1700
} | class ____(Serializer):
"""Serializer for OrganizationSamplingProjectRatesEndpoint.get"""
def get_attrs(self, item_list, user, **kwargs) -> MutableMapping[Any, Any]:
options = ProjectOption.objects.get_value_bulk(item_list, OPTION_KEY)
# NOTE: `get_value_bulk` does not resolve defaults. The default does not
# depend on epochs, so we can speed this up by using the constant.
return {
item: value if value is not None else TARGET_SAMPLE_RATE_DEFAULT
for item, value in options.items()
}
def serialize(self, obj: Any, attrs: Any, user, **kwargs) -> Mapping[str, Any]:
return {"id": obj.id, "sampleRate": attrs}
| GetSerializer |
python | gevent__gevent | src/gevent/events.py | {
"start": 11020,
"end": 11178
} | class ____(IGeventPatchEvent):
"""
An event emitted *after* gevent has patched something.
"""
@implementer(IGeventDidPatchEvent)
| IGeventDidPatchEvent |
python | ray-project__ray | python/ray/serve/tests/test_metrics_2.py | {
"start": 846,
"end": 996
} | class ____:
async def __call__(self):
signal = ray.get_actor("signal123")
await signal.wait.remote()
@serve.deployment
| WaitForSignal |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/errors.py | {
"start": 772,
"end": 914
} | class ____(DagsterDbtError):
"""Error when we expect manifest.json to generated already but it is absent."""
| DagsterDbtManifestNotFoundError |
python | dagster-io__dagster | python_modules/libraries/dagster-celery-docker/dagster_celery_docker/executor.py | {
"start": 6169,
"end": 13106
} | class ____(Executor):
def __init__(
self,
retries,
docker_config,
broker=None,
backend=None,
include=None,
config_source=None,
):
self._retries = check.inst_param(retries, "retries", RetryMode)
self.broker = check.opt_str_param(broker, "broker", default=broker_url)
self.backend = check.opt_str_param(backend, "backend", default=result_backend)
self.include = check.opt_list_param(include, "include", of_type=str)
self.config_source = dict_wrapper(
dict(DEFAULT_CONFIG, **check.opt_dict_param(config_source, "config_source"))
)
self.docker_config = check.dict_param(docker_config, "docker_config")
@property
def retries(self):
return self._retries
def execute(self, plan_context, execution_plan):
return core_celery_execution_loop(
plan_context, execution_plan, step_execution_fn=_submit_task_docker
)
def app_args(self):
return {
"broker": self.broker,
"backend": self.backend,
"include": self.include,
"config_source": self.config_source,
"retries": self.retries,
}
def _submit_task_docker(app, plan_context, step, queue, priority, known_state):
execute_step_args = ExecuteStepArgs(
job_origin=plan_context.reconstructable_job.get_python_origin(),
run_id=plan_context.dagster_run.run_id,
step_keys_to_execute=[step.key],
instance_ref=plan_context.instance.get_ref(),
retry_mode=plan_context.executor.retries.for_inner_plan(),
known_state=known_state,
print_serialized_events=True,
)
task = create_docker_task(app)
task_signature = task.si( # pyright: ignore[reportFunctionMemberAccess]
execute_step_args_packed=pack_value(execute_step_args),
docker_config=plan_context.executor.docker_config,
)
return task_signature.apply_async(
priority=priority,
queue=queue,
routing_key=f"{queue}.execute_step_docker",
)
def create_docker_task(celery_app, **task_kwargs):
@celery_app.task(bind=True, name="execute_step_docker", **task_kwargs)
def _execute_step_docker(
self,
execute_step_args_packed,
docker_config,
):
"""Run step execution in a Docker container."""
execute_step_args = unpack_value(
check.dict_param(
execute_step_args_packed,
"execute_step_args_packed",
),
as_type=ExecuteStepArgs,
)
check.dict_param(docker_config, "docker_config")
instance = DagsterInstance.from_ref(execute_step_args.instance_ref) # pyright: ignore[reportArgumentType]
dagster_run = check.not_none(
instance.get_run_by_id(execute_step_args.run_id),
f"Could not load run {execute_step_args.run_id}",
)
step_keys_str = ", ".join(execute_step_args.step_keys_to_execute) # pyright: ignore[reportCallIssue,reportArgumentType]
docker_image = (
docker_config["image"]
if docker_config.get("image")
else dagster_run.job_code_origin.repository_origin.container_image # pyright: ignore[reportOptionalMemberAccess]
)
if not docker_image:
raise Exception("No docker image specified by either the job or the repository")
client = docker.client.from_env()
if docker_config.get("registry"):
client.login(
registry=docker_config["registry"]["url"],
username=docker_config["registry"]["username"],
password=docker_config["registry"]["password"],
)
# Post event for starting execution
engine_event = instance.report_engine_event(
f"Executing steps {step_keys_str} in Docker container {docker_image}",
dagster_run,
EngineEventData(
{
"Step keys": step_keys_str,
"Image": docker_image,
"Celery worker": self.request.hostname,
},
marker_end=DELEGATE_MARKER,
),
CeleryDockerExecutor,
step_key=execute_step_args.step_keys_to_execute[0], # pyright: ignore[reportOptionalSubscript]
)
serialized_events = [serialize_value(engine_event)]
docker_env = {}
if docker_config.get("env_vars"):
docker_env = {env_name: os.getenv(env_name) for env_name in docker_config["env_vars"]}
container_kwargs = check.opt_dict_param(
docker_config.get("container_kwargs"), "container_kwargs", key_type=str
)
# set defaults for detach and auto_remove
container_kwargs["detach"] = container_kwargs.get("detach", False)
container_kwargs["auto_remove"] = container_kwargs.get("auto_remove", True)
# if environment variables are provided via container_kwargs, merge with env_vars
if container_kwargs.get("environment") is not None:
e_vars = container_kwargs.get("environment")
if isinstance(e_vars, dict):
docker_env.update(e_vars)
else:
for v in e_vars: # pyright: ignore[reportOptionalIterable]
key, val = v.split("=")
docker_env[key] = val
del container_kwargs["environment"]
try:
docker_response = client.containers.run(
docker_image,
command=execute_step_args.get_command_args(), # type: ignore # Sequence list mismatch
environment=docker_env, # type: ignore # Mapping dict mismatch
network=docker_config.get("network", None),
**container_kwargs,
)
res = docker_response.decode("utf-8")
except docker.errors.ContainerError as err: # pyright: ignore[reportAttributeAccessIssue]
metadata = {"Job image": docker_image}
if err.stderr is not None:
metadata["Docker stderr"] = err.stderr
instance.report_engine_event(
f"Failed to run steps {step_keys_str} in Docker container {docker_image}",
dagster_run,
EngineEventData(metadata),
CeleryDockerExecutor,
step_key=execute_step_args.step_keys_to_execute[0], # pyright: ignore[reportOptionalSubscript]
)
raise
else:
if res is None:
raise Exception("No response from execute_step in CeleryDockerExecutor")
events = filter_dagster_events_from_cli_logs(res.split("\n"))
serialized_events += [serialize_value(event) for event in events]
return serialized_events
return _execute_step_docker
| CeleryDockerExecutor |
python | apache__airflow | providers/papermill/tests/unit/papermill/hooks/test_kernel.py | {
"start": 894,
"end": 1581
} | class ____:
"""
Tests for Kernel connection
"""
def test_kernel_connection(self):
"""
Test that fetches kernelConnection with configured host and ports
"""
from airflow.providers.papermill.hooks.kernel import KernelHook
conn = Connection(
conn_type="jupyter_kernel", host="test_host", extra='{"shell_port": 60000, "session_key": "key"}'
)
with patch.object(KernelHook, "get_connection", return_value=conn):
hook = KernelHook()
assert hook.get_conn().ip == "test_host"
assert hook.get_conn().shell_port == 60000
assert hook.get_conn().session_key == "key"
| TestKernelHook |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_emr_base.py | {
"start": 2237,
"end": 4033
} | class ____:
def test_poke_returns_true_when_state_is_in_target_states(self):
operator = EmrBaseSensorSubclass(
task_id="test_task",
poke_interval=2,
)
operator.response = {
"SomeKey": {"State": TARGET_STATE},
"ResponseMetadata": {"HTTPStatusCode": GOOD_HTTP_STATUS},
}
operator.execute(None)
def test_poke_returns_false_when_state_is_not_in_target_states(self):
operator = EmrBaseSensorSubclass(
task_id="test_task",
poke_interval=2,
)
operator.response = {
"SomeKey": {"State": NON_TARGET_STATE},
"ResponseMetadata": {"HTTPStatusCode": GOOD_HTTP_STATUS},
}
assert operator.poke(None) is False
def test_poke_returns_false_when_http_response_is_bad(self):
operator = EmrBaseSensorSubclass(
task_id="test_task",
poke_interval=2,
)
operator.response = {
"SomeKey": {"State": TARGET_STATE},
"ResponseMetadata": {"HTTPStatusCode": BAD_HTTP_STATUS},
}
assert operator.poke(None) is False
def test_poke_raises_error_when_state_is_in_failed_states(self):
operator = EmrBaseSensorSubclass(
task_id="test_task",
poke_interval=2,
)
operator.response = {
"SomeKey": {"State": FAILED_STATE, "StateChangeReason": {"Code": EXPECTED_CODE}},
"ResponseMetadata": {"HTTPStatusCode": GOOD_HTTP_STATUS},
}
with pytest.raises(AirflowException) as ctx:
operator.poke(None)
assert "EMR job failed" in str(ctx.value)
assert EXPECTED_CODE in str(ctx.value)
assert EMPTY_CODE not in str(ctx.value)
| TestEmrBaseSensor |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 3574,
"end": 3877
} | class ____(ConcreteTemplate):
key = cuda.match_any_sync
cases = [
signature(types.i4, types.i4, types.i4),
signature(types.i4, types.i4, types.i8),
signature(types.i4, types.i4, types.f4),
signature(types.i4, types.i4, types.f8),
]
@register
| Cuda_match_any_sync |
python | pytorch__pytorch | test/distributed/_composable/test_replicate_training.py | {
"start": 2952,
"end": 7196
} | class ____(FSDPTestMultiThread):
@property
def world_size(self) -> int:
return 4
@skip_if_lt_x_gpu(1)
def test_param_registration_after_forward(self):
"""Tests the parameter registration after forward."""
device = torch.device(device_type.type, 0)
# Single Replicate group
torch.manual_seed(42)
model = MLP(3, device)
# Since seed is per process, not per thread, we broadcast to ensure
# the same parameters across ranks
for param in model.parameters():
dist.broadcast(param, src=0)
ref_model = copy.deepcopy(model)
replicate(model) # root only
inp = torch.randn((2, 3), device=device_type.type)
self._assert_dtensor_params(model.parameters())
self._assert_same_params(model.parameters(), ref_model.parameters())
model(inp)
self._assert_tensor_params(model.parameters())
self._assert_same_params(model.parameters(), ref_model.parameters())
model.reshard() # however, we can manually reshard
self._assert_dtensor_params(model.parameters())
self._assert_same_params(model.parameters(), ref_model.parameters())
# Multiple Replicate groups
torch.manual_seed(42)
model = nn.Sequential(MLP(3, device), MLP(3, device))
for param in model.parameters():
dist.broadcast(param, src=0)
ref_model = copy.deepcopy(model)
replicate(model[0].in_proj)
replicate(model[0].out_proj)
replicate(model)
self._assert_dtensor_params(model.parameters())
self._assert_same_params(model.parameters(), ref_model.parameters())
model(inp)
non_root_params = list(model[0].in_proj.parameters()) + list(
model[0].out_proj.parameters()
)
root_params = list(set(model.parameters()) - set(non_root_params))
self._assert_tensor_params(non_root_params)
self._assert_tensor_params(root_params)
self._assert_same_params(model.parameters(), ref_model.parameters())
for module in model.modules():
if isinstance(module, FSDPModule):
module.reshard() # however, we can manually reshard
self._assert_dtensor_params(model.parameters())
self._assert_same_params(model.parameters(), ref_model.parameters())
@skip_if_lt_x_gpu(1)
def test_param_registration_after_backward(self):
"""Tests the parameter registration after backward."""
device = torch.device(device_type.type, 0)
# Single Replicate group
model = MLP(8, device)
replicate(model) # root only
inp = torch.randn((2, 8), device=device_type.type)
self._assert_dtensor_params(model.parameters())
model(inp).sum().backward()
self._assert_dtensor_params(model.parameters())
# Multiple Replicate groups
model = MLP(8, device)
replicate(model.in_proj)
replicate(model.out_proj)
replicate(model)
self._assert_dtensor_params(model.parameters())
model(inp).sum().backward()
self._assert_dtensor_params(model.parameters())
def _assert_tensor_params(self, params: Iterable[nn.Parameter]):
# need to iterate over the list multiple times
params = list(params)
self.assertGreater(len(params), 0)
for param in params:
self.assertNotIsInstance(param, DTensor)
self.assertIsInstance(param, torch.Tensor)
def _assert_dtensor_params(self, params: Iterable[nn.Parameter]):
params = list(params)
self.assertGreater(len(params), 0)
for param in params:
self.assertIsInstance(param, DTensor)
def _assert_same_params(
self, params: Iterable[nn.Parameter], ref_params: Iterable[nn.Parameter]
):
params, ref_params = list(params), list(ref_params)
self.assertEqual(len(params), len(ref_params))
for param, ref_param in zip(params, ref_params):
if isinstance(param, DTensor):
param = param.full_tensor()
self.assertEqual(param.shape, ref_param.shape)
self.assertEqual(param, ref_param)
| TestReplicateRegisteredParams |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_basic.py | {
"start": 2673,
"end": 34455
} | class ____(fixtures.TestBase):
def test_unbound_declarative_base(self):
Base = declarative_base()
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
s = Session()
with testing.expect_raises(exc.UnboundExecutionError):
s.get_bind(User)
def test_unbound_cls_registry(self):
reg = registry()
Base = reg.generate_base()
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
s = Session()
with testing.expect_raises(exc.UnboundExecutionError):
s.get_bind(User)
@testing.variation(
"base_type",
["declbase", "declbasenometa", "declbasefn", "asdeclarative"],
)
def test_reg_constructor_is_present(self, base_type):
"""test #9171"""
if base_type.declbase:
class Base(DeclarativeBase):
pass
elif base_type.declbasenometa:
class Base(DeclarativeBaseNoMeta):
pass
elif base_type.declbasefn:
Base = declarative_base()
elif base_type.asdeclarative:
@as_declarative()
class Base:
pass
else:
base_type.fail()
# check for direct assignment
is_(Base.registry.constructor, Base.__init__)
is_(Base.__dict__["__init__"], Base.__init__)
class fakeself:
foo = None
bar = None
fs = fakeself()
Base.__init__(fs, foo="bar", bar="bat")
eq_(fs.foo, "bar")
eq_(fs.bar, "bat")
@testing.variation(
"base_type",
[
"declbase",
"declbasenometa",
"declbasefn",
"asdeclarative",
"mixinonbase",
],
)
def test_reg_constructor_custom_init(self, base_type):
"""test for #9171 testing what an explicit __init__ does.
Here we decide that newer DeclarativeBase superclasses should
honor the ``__init__`` that's given.
"""
m1 = mock.Mock()
if base_type.declbase:
class Base(DeclarativeBase):
def __init__(self, x=None):
m1.init(x)
elif base_type.declbasenometa:
class Base(DeclarativeBaseNoMeta):
def __init__(self, x=None):
m1.init(x)
elif base_type.declbasefn:
class _B:
def __init__(self, x=None):
m1.init(x)
Base = declarative_base(cls=_B)
elif base_type.mixinonbase:
class Mixin:
def __init__(self, x=None):
m1.init(x)
class Base(Mixin, DeclarativeBase):
pass
elif base_type.asdeclarative:
@as_declarative()
class Base:
def __init__(self, x=None):
m1.init(x)
else:
base_type.fail()
class fakeself:
pass
fs = fakeself()
if (
base_type.declbase
or base_type.declbasenometa
or base_type.mixinonbase
):
Base.__init__(fs, x=5)
eq_(m1.mock_calls, [mock.call.init(5)])
else:
with expect_raises_message(
TypeError, "'x' is an invalid keyword argument for fakeself"
):
Base.__init__(fs, x=5)
def test_insert_sentinel_param_custom_type_maintained(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id: Mapped[uuid.UUID] = mapped_column(
default=uuid.uuid4, primary_key=True, insert_sentinel=True
)
data: Mapped[str]
is_(A.id.expression.type._type_affinity, Uuid)
def test_insert_sentinel_param_default_type(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(
primary_key=True, insert_sentinel=True
)
data: Mapped[str]
is_(A.id.expression.type._type_affinity, Integer)
@testing.variation("argument", ["version_id_col", "polymorphic_on"])
@testing.variation("column_type", ["anno", "non_anno", "plain_column"])
def test_mapped_column_version_poly_arg(
self, decl_base, column_type, argument
):
"""test #9240"""
if column_type.anno:
class A(decl_base):
__tablename__ = "a"
a: Mapped[int] = mapped_column(primary_key=True)
b: Mapped[int] = mapped_column()
c: Mapped[str] = mapped_column()
if argument.version_id_col:
__mapper_args__ = {"version_id_col": b}
elif argument.polymorphic_on:
__mapper_args__ = {"polymorphic_on": c}
else:
argument.fail()
elif column_type.non_anno:
class A(decl_base):
__tablename__ = "a"
a = mapped_column(Integer, primary_key=True)
b = mapped_column(Integer)
c = mapped_column(String)
if argument.version_id_col:
__mapper_args__ = {"version_id_col": b}
elif argument.polymorphic_on:
__mapper_args__ = {"polymorphic_on": c}
else:
argument.fail()
elif column_type.plain_column:
class A(decl_base):
__tablename__ = "a"
a = Column(Integer, primary_key=True)
b = Column(Integer)
c = Column(String)
if argument.version_id_col:
__mapper_args__ = {"version_id_col": b}
elif argument.polymorphic_on:
__mapper_args__ = {"polymorphic_on": c}
else:
argument.fail()
else:
column_type.fail()
if argument.version_id_col:
assert A.__mapper__.version_id_col is A.__table__.c.b
elif argument.polymorphic_on:
assert A.__mapper__.polymorphic_on is A.__table__.c.c
else:
argument.fail()
@testing.variation(
"pk_type", ["single", "tuple", "list", "single_str", "list_str"]
)
@testing.variation("column_type", ["anno", "non_anno", "plain_column"])
def test_mapped_column_pk_arg(self, decl_base, column_type, pk_type):
"""test #9240"""
if column_type.anno:
class A(decl_base):
__tablename__ = "a"
a: Mapped[int] = mapped_column()
b: Mapped[int] = mapped_column()
if pk_type.single:
__mapper_args__ = {"primary_key": a}
elif pk_type.tuple:
__mapper_args__ = {"primary_key": (a, b)}
elif pk_type.list:
__mapper_args__ = {"primary_key": [a, b]}
elif pk_type.single_str:
__mapper_args__ = {"primary_key": "a"}
elif pk_type.list_str:
__mapper_args__ = {"primary_key": ["a", "b"]}
else:
pk_type.fail()
elif column_type.non_anno:
class A(decl_base):
__tablename__ = "a"
a = mapped_column(Integer)
b = mapped_column(Integer)
if pk_type.single:
__mapper_args__ = {"primary_key": a}
elif pk_type.tuple:
__mapper_args__ = {"primary_key": (a, b)}
elif pk_type.list:
__mapper_args__ = {"primary_key": [a, b]}
elif pk_type.single_str:
__mapper_args__ = {"primary_key": "a"}
elif pk_type.list_str:
__mapper_args__ = {"primary_key": ["a", "b"]}
else:
pk_type.fail()
elif column_type.plain_column:
class A(decl_base):
__tablename__ = "a"
a = Column(Integer)
b = Column(Integer)
if pk_type.single:
__mapper_args__ = {"primary_key": a}
elif pk_type.tuple:
__mapper_args__ = {"primary_key": (a, b)}
elif pk_type.list:
__mapper_args__ = {"primary_key": [a, b]}
elif pk_type.single_str:
__mapper_args__ = {"primary_key": "a"}
elif pk_type.list_str:
__mapper_args__ = {"primary_key": ["a", "b"]}
else:
pk_type.fail()
else:
column_type.fail()
if pk_type.single or pk_type.single_str:
assert A.__mapper__.primary_key[0] is A.__table__.c.a
else:
assert A.__mapper__.primary_key[0] is A.__table__.c.a
assert A.__mapper__.primary_key[1] is A.__table__.c.b
def test_mapper_pk_arg_degradation_no_col(self, decl_base):
with expect_raises_message(
exc.ArgumentError,
"Can't determine primary_key column 'q' - no attribute is "
"mapped to this name.",
):
class A(decl_base):
__tablename__ = "a"
a: Mapped[int] = mapped_column()
b: Mapped[int] = mapped_column()
__mapper_args__ = {"primary_key": "q"}
@testing.variation("proptype", ["relationship", "colprop"])
def test_mapper_pk_arg_degradation_is_not_a_col(self, decl_base, proptype):
with expect_raises_message(
exc.ArgumentError,
"Can't determine primary_key column 'b'; property does "
"not refer to a single mapped Column",
):
class A(decl_base):
__tablename__ = "a"
a: Mapped[int] = mapped_column(Integer)
if proptype.colprop:
b: Mapped[int] = column_property(a + 5)
elif proptype.relationship:
b = relationship("B")
else:
proptype.fail()
__mapper_args__ = {"primary_key": "b"}
@testing.variation(
"argument", ["version_id_col", "polymorphic_on", "primary_key"]
)
@testing.variation("argtype", ["callable", "fixed"])
@testing.variation("column_type", ["mapped_column", "plain_column"])
def test_mapped_column_pk_arg_via_mixin(
self, decl_base, argtype, column_type, argument
):
"""test #9240"""
class Mixin:
if column_type.mapped_column:
a: Mapped[int] = mapped_column()
b: Mapped[int] = mapped_column()
c: Mapped[str] = mapped_column()
elif column_type.plain_column:
a = Column(Integer)
b = Column(Integer)
c = Column(String)
else:
column_type.fail()
if argtype.callable:
@declared_attr.directive
@classmethod
def __mapper_args__(cls):
if argument.primary_key:
return {"primary_key": [cls.a, cls.b]}
elif argument.version_id_col:
return {"version_id_col": cls.b, "primary_key": cls.a}
elif argument.polymorphic_on:
return {"polymorphic_on": cls.c, "primary_key": cls.a}
else:
argument.fail()
elif argtype.fixed:
if argument.primary_key:
__mapper_args__ = {"primary_key": [a, b]}
elif argument.version_id_col:
__mapper_args__ = {"primary_key": a, "version_id_col": b}
elif argument.polymorphic_on:
__mapper_args__ = {"primary_key": a, "polymorphic_on": c}
else:
argument.fail()
else:
argtype.fail()
class A(Mixin, decl_base):
__tablename__ = "a"
if argument.primary_key:
assert A.__mapper__.primary_key[0] is A.__table__.c.a
assert A.__mapper__.primary_key[1] is A.__table__.c.b
elif argument.version_id_col:
assert A.__mapper__.version_id_col is A.__table__.c.b
elif argument.polymorphic_on:
assert A.__mapper__.polymorphic_on is A.__table__.c.c
else:
argtype.fail()
def test_dispose_attrs(self):
reg = registry()
class Foo:
__tablename__ = "some_table"
id = Column(Integer, primary_key=True)
reg.mapped(Foo)
is_(Foo.__mapper__, class_mapper(Foo))
is_(Foo.__table__, class_mapper(Foo).local_table)
clear_mappers()
assert not hasattr(Foo, "__mapper__")
assert not hasattr(Foo, "__table__")
from sqlalchemy.orm import clsregistry
assert clsregistry._key_is_empty(
"Foo", reg._class_registry, lambda cls: cls is Foo
)
def test_difficult_class(self, metadata):
"""test no getattr() errors with a customized class"""
# metaclass to mock the way zope.interface breaks getattr()
class BrokenMeta(type):
def __getattribute__(self, attr):
if attr == "xyzzy":
raise AttributeError("xyzzy")
else:
return object.__getattribute__(self, attr)
# even though this class has an xyzzy attribute, getattr(cls,"xyzzy")
# fails
class BrokenParent(metaclass=BrokenMeta):
xyzzy = "magic"
# _as_declarative() inspects obj.__class__.__bases__
class User(BrokenParent, ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
reg = registry(metadata=metadata)
mp = reg.map_declaratively(User)
assert mp is inspect(User)
assert mp is User.__mapper__
def test_undefer_column_name(self):
# TODO: not sure if there was an explicit
# test for this elsewhere
foo = Column(Integer)
eq_(str(foo), "(no name)")
eq_(foo.key, None)
eq_(foo.name, None)
decl_base._undefer_column_name("foo", foo)
eq_(str(foo), "foo")
eq_(foo.key, "foo")
eq_(foo.name, "foo")
def test_string_dependency_resolution_schemas(self):
Base = declarative_base()
class User(Base):
__tablename__ = "users"
__table_args__ = {"schema": "fooschema"}
id = Column(Integer, primary_key=True)
name = Column(String(50))
props = relationship(
"Prop",
secondary="fooschema.user_to_prop",
primaryjoin="User.id==fooschema.user_to_prop.c.user_id",
secondaryjoin="fooschema.user_to_prop.c.prop_id==Prop.id",
backref="users",
)
class Prop(Base):
__tablename__ = "props"
__table_args__ = {"schema": "fooschema"}
id = Column(Integer, primary_key=True)
name = Column(String(50))
user_to_prop = Table(
"user_to_prop",
Base.metadata,
Column("user_id", Integer, ForeignKey("fooschema.users.id")),
Column("prop_id", Integer, ForeignKey("fooschema.props.id")),
schema="fooschema",
)
configure_mappers()
assert (
class_mapper(User).get_property("props").secondary is user_to_prop
)
def test_string_dependency_resolution_schemas_no_base(self):
"""
found_during_type_annotation
"""
reg = registry()
@reg.mapped
class User:
__tablename__ = "users"
__table_args__ = {"schema": "fooschema"}
id = Column(Integer, primary_key=True)
name = Column(String(50))
props = relationship(
"Prop",
secondary="fooschema.user_to_prop",
primaryjoin="User.id==fooschema.user_to_prop.c.user_id",
secondaryjoin="fooschema.user_to_prop.c.prop_id==Prop.id",
backref="users",
)
@reg.mapped
class Prop:
__tablename__ = "props"
__table_args__ = {"schema": "fooschema"}
id = Column(Integer, primary_key=True)
name = Column(String(50))
user_to_prop = Table(
"user_to_prop",
reg.metadata,
Column("user_id", Integer, ForeignKey("fooschema.users.id")),
Column("prop_id", Integer, ForeignKey("fooschema.props.id")),
schema="fooschema",
)
configure_mappers()
assert (
class_mapper(User).get_property("props").secondary is user_to_prop
)
def test_string_dependency_resolution_annotations(self):
Base = declarative_base()
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
name = Column(String)
children = relationship(
"Child",
primaryjoin="Parent.name=="
"remote(foreign(func.lower(Child.name_upper)))",
)
class Child(Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
name_upper = Column(String)
configure_mappers()
eq_(
Parent.children.property._calculated_foreign_keys,
{Child.name_upper.property.columns[0]},
)
def test_class_has_registry_attr(self, registry):
existing_registry = registry
Base = registry.generate_base()
class A(Base):
__tablename__ = "a"
registry = {"foo": "bar"}
id = Column(Integer, primary_key=True)
data = Column(String)
class SubA(A):
pass
is_(Base.registry, existing_registry)
is_(inspect(A).registry, existing_registry)
eq_(A.registry, {"foo": "bar"})
is_(inspect(SubA).registry, existing_registry)
eq_(SubA.registry, {"foo": "bar"})
def test_class_does_not_have_registry_attr(self):
with assertions.expect_raises_message(
exc.InvalidRequestError,
r"Declarative base class has no 'registry' attribute, or "
r"registry is not a sqlalchemy.orm.registry\(\) object",
):
class Base(metaclass=DeclarativeMeta):
metadata = sa.MetaData()
def test_shared_class_registry(self):
reg = {}
Base1 = declarative_base(class_registry=reg)
Base2 = declarative_base(class_registry=reg)
class A(Base1):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
class B(Base2):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
aid = Column(Integer, ForeignKey(A.id))
as_ = relationship("A")
assert B.as_.property.mapper.class_ is A
def test_custom_base(self):
class MyBase:
def foobar(self):
return "foobar"
Base = declarative_base(cls=MyBase)
assert hasattr(Base, "metadata")
assert Base().foobar() == "foobar"
def test_as_declarative(self, metadata):
class User(ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship("Address", backref="user")
class Address(ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
reg = registry(metadata=metadata)
reg.mapped(User)
reg.mapped(Address)
reg.metadata.create_all(testing.db)
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
with Session(testing.db) as sess:
sess.add(u1)
sess.commit()
with Session(testing.db) as sess:
eq_(
sess.query(User).all(),
[
User(
name="u1",
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_map_declaratively(self, metadata):
class User(ComparableEntity):
__tablename__ = "users"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column("name", String(50))
addresses = relationship("Address", backref="user")
class Address(ComparableEntity):
__tablename__ = "addresses"
id = Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
)
email = Column("email", String(50))
user_id = Column("user_id", Integer, ForeignKey("users.id"))
reg = registry(metadata=metadata)
um = reg.map_declaratively(User)
am = reg.map_declaratively(Address)
is_(User.__mapper__, um)
is_(Address.__mapper__, am)
reg.metadata.create_all(testing.db)
u1 = User(
name="u1", addresses=[Address(email="one"), Address(email="two")]
)
with Session(testing.db) as sess:
sess.add(u1)
sess.commit()
with Session(testing.db) as sess:
eq_(
sess.query(User).all(),
[
User(
name="u1",
addresses=[Address(email="one"), Address(email="two")],
)
],
)
def test_custom_mapper_attribute(self):
def mymapper(cls, tbl, **kwargs):
m = sa.orm.Mapper(cls, tbl, **kwargs)
m.CHECK = True
return m
base = declarative_base()
class Foo(base):
__tablename__ = "foo"
__mapper_cls__ = mymapper
id = Column(Integer, primary_key=True)
eq_(Foo.__mapper__.CHECK, True)
def test_custom_mapper_argument(self):
def mymapper(cls, tbl, **kwargs):
m = sa.orm.Mapper(cls, tbl, **kwargs)
m.CHECK = True
return m
base = declarative_base(mapper=mymapper)
class Foo(base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
eq_(Foo.__mapper__.CHECK, True)
def test_no_change_to_all_descriptors(self):
base = declarative_base()
class Foo(base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
eq_(Foo.__mapper__.all_orm_descriptors.keys(), ["id"])
def test_cls_docstring(self):
class MyBase:
"""MyBase Docstring"""
Base = declarative_base(cls=MyBase)
eq_(Base.__doc__, MyBase.__doc__)
def test_delattr_mapped_raises(self):
Base = declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
data = Column(String)
def go():
del Foo.data
assert_raises_message(
NotImplementedError,
"Can't un-map individual mapped attributes on a mapped class.",
go,
)
def test_delattr_hybrid_fine(self):
Base = declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
data = Column(String)
@hybrid_property
def data_hybrid(self):
return self.data
assert "data_hybrid" in Foo.__mapper__.all_orm_descriptors.keys()
del Foo.data_hybrid
assert "data_hybrid" not in Foo.__mapper__.all_orm_descriptors.keys()
assert not hasattr(Foo, "data_hybrid")
def test_setattr_hybrid_updates_descriptors(self):
Base = declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
data = Column(String)
assert "data_hybrid" not in Foo.__mapper__.all_orm_descriptors.keys()
@hybrid_property
def data_hybrid(self):
return self.data
Foo.data_hybrid = data_hybrid
assert "data_hybrid" in Foo.__mapper__.all_orm_descriptors.keys()
del Foo.data_hybrid
assert "data_hybrid" not in Foo.__mapper__.all_orm_descriptors.keys()
assert not hasattr(Foo, "data_hybrid")
def test_kw_support_in_declarative_meta_init(self, registry):
# This will not fail if DeclarativeMeta __init__ supports **kw
reg = registry
class Base(metaclass=DeclarativeMeta):
__abstract__ = True
registry = reg
metadata = reg.metadata
class BaseUser(Base):
__tablename__ = "base"
id_ = Column(Integer, primary_key=True)
@classmethod
def __init_subclass__(cls, random_keyword=False, **kw):
super().__init_subclass__(**kw)
cls._set_random_keyword_used_here = random_keyword
class User(BaseUser):
__tablename__ = "user"
id_ = Column(Integer, ForeignKey("base.id_"), primary_key=True)
# Check the default option
eq_(User._set_random_keyword_used_here, False)
# Build the metaclass with a keyword!
bases = (BaseUser,)
UserType = DeclarativeMeta("UserType", bases, {}, random_keyword=True)
# Check to see if __init_subclass__ works in supported versions
eq_(UserType._set_random_keyword_used_here, True)
@testing.variation(
"basetype",
["DeclarativeBase", "DeclarativeBaseNoMeta", "MappedAsDataclass"],
)
def test_kw_support_in_declarative_base(self, basetype):
"""test #10732"""
if basetype.DeclarativeBase:
class Base(DeclarativeBase):
pass
elif basetype.DeclarativeBaseNoMeta:
class Base(DeclarativeBaseNoMeta):
pass
elif basetype.MappedAsDataclass:
class Base(MappedAsDataclass):
pass
else:
basetype.fail()
class Mixin:
def __init_subclass__(cls, random_keyword: bool, **kw) -> None:
super().__init_subclass__(**kw)
cls._set_random_keyword_used_here = random_keyword
class User(Base, Mixin, random_keyword=True):
__tablename__ = "user"
id_ = Column(Integer, primary_key=True)
eq_(User._set_random_keyword_used_here, True)
def test_declarative_base_bad_registry(self):
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Declarative base class has a 'registry' attribute "
"that is not an instance",
):
class MyClass(DeclarativeBase):
registry = {"foo": "bar"}
def test_declarative_base_registry_and_type_map(self):
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Declarative base class has both a 'registry' attribute and a "
"type_annotation_map entry. Per-base type_annotation_maps",
):
class MyClass(DeclarativeBase):
registry = registry()
type_annotation_map = {int: Integer}
@testing.combinations(DeclarativeBase, DeclarativeBaseNoMeta)
def test_declarative_base_used_directly(self, base):
with assertions.expect_raises_message(
exc.InvalidRequestError,
f"Cannot use {base.__name__!r} directly as a declarative base",
):
class MyClass(base):
__tablename__ = "foobar"
id: int = mapped_column(primary_key=True)
with assertions.expect_raises_message(
exc.InvalidRequestError,
f"Cannot use {base.__name__!r} directly as a declarative base",
):
class MyClass2(base):
__table__ = sa.Table(
"foobar",
sa.MetaData(),
sa.Column("id", Integer, primary_key=True),
)
@testing.combinations(DeclarativeBase, DeclarativeBaseNoMeta)
def test_declarative_base_fn_ok(self, base):
# __tablename__ or __table__ as declared_attr are ok in the base
class MyBase1(base):
@declared_attr
def __tablename__(cls):
return cls.__name__
class MyBase2(base):
@declared_attr
def __table__(cls):
return sa.Table(
"foobar",
sa.MetaData(),
sa.Column("id", Integer, primary_key=True),
)
def test_cannot_add_to_selectable(self):
class Base(DeclarativeBase):
pass
class Foo(Base):
__table__ = (
select(sa.Column("x", sa.Integer, primary_key=True))
.select_from(sa.table("foo"))
.subquery("foo")
)
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Cannot add a new attribute to mapped class 'Foo' "
"because it's not mapped against a table",
):
Foo.y = mapped_column(sa.Text)
def test_default_column_order(self, decl_base):
class M1:
a: Mapped[int]
b: Mapped[int] = mapped_column(primary_key=True)
class M2(decl_base):
__abstract__ = True
c: Mapped[int]
d: Mapped[int]
class M(M1, M2, decl_base):
e: Mapped[int]
f: Mapped[int]
g: Mapped[int]
__tablename__ = "m"
actual = list(M.__table__.c.keys())
expected = ["e", "f", "g", "a", "b", "c", "d"]
eq_(actual, expected)
def test_custom_column_sort_order(self, decl_base):
class M1:
a: Mapped[int] = mapped_column(sort_order=-42)
b: Mapped[int] = mapped_column(primary_key=True)
class M2(decl_base):
__abstract__ = True
c: Mapped[int] = mapped_column(sort_order=-1)
d: Mapped[int]
class M(M1, M2, decl_base):
e: Mapped[int]
f: Mapped[int] = mapped_column(sort_order=10)
g: Mapped[int] = mapped_column(sort_order=-10)
__tablename__ = "m"
actual = list(M.__table__.c.keys())
expected = ["a", "g", "c", "e", "b", "d", "f"]
eq_(actual, expected)
@testing.combinations(
("declarative_base_nometa_superclass",),
("declarative_base_superclass",),
("dynamic",),
("explicit",),
argnames="base_style",
id_="s",
)
| DeclarativeBaseSetupsTest |
python | tensorflow__tensorflow | third_party/xla/xla/codegen/testlib/kernel_runner_test.py | {
"start": 924,
"end": 1728
} | class ____(absltest.TestCase):
def test_from_instruction(self):
shape = xla_extension.Shape.array_shape(np.dtype(np.int32), (4,))
hlo_parameter = _extension.HloInstruction.create_parameter(
0, shape, "input"
)
hlo_op = _extension.HloInstruction.create_variadic(
shape, _extension.HloOpcode.sine, [hlo_parameter]
)
hlo_module = _extension.HloModule(hlo_op.name() + "_module")
hlo_module.add_entry_computation(
_extension.build_hlo_computation(hlo_op, hlo_parameter)
)
expected_parts = [
"HloModule sine_module,",
"{",
"%input = s32[4]{0} parameter(0)",
"ROOT %sine = s32[4]{0} sine(%input)",
"}",
]
self.assertContainsInOrder(
expected_parts,
str(hlo_module),
)
| HloModuleParse |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/job_snapshot.py | {
"start": 3333,
"end": 15926
} | class ____(IHaveNew):
name: str
description: Optional[str]
tags: Mapping[str, Any]
# It is important that run_tags is nullable to distinguish in host code between
# snapshots from older code servers where run_tags does not exist as a field (and is
# therefore None) vs snapshots from newer code servers where run_tags is always set, if
# sometimes empty. In the None case, we need to set run_tags to tags (at the level of
# ExternalJob) to maintain backcompat.
run_tags: Optional[Mapping[str, Any]]
config_schema_snapshot: ConfigSchemaSnapshot
dagster_type_namespace_snapshot: DagsterTypeNamespaceSnapshot
node_defs_snapshot: NodeDefsSnapshot
dep_structure_snapshot: DependencyStructureSnapshot
mode_def_snaps: Sequence[ModeDefSnap]
lineage_snapshot: Optional["JobLineageSnap"]
graph_def_name: str
metadata: Mapping[str, MetadataValue]
owners: Optional[Sequence[str]]
def __new__(
cls,
name: str,
description: Optional[str],
tags: Optional[Mapping[str, Any]],
run_tags: Optional[Mapping[str, Any]],
config_schema_snapshot: ConfigSchemaSnapshot,
dagster_type_namespace_snapshot: DagsterTypeNamespaceSnapshot,
node_defs_snapshot: NodeDefsSnapshot,
dep_structure_snapshot: DependencyStructureSnapshot,
mode_def_snaps: Sequence[ModeDefSnap],
lineage_snapshot: Optional["JobLineageSnap"],
graph_def_name: str,
metadata: Optional[Mapping[str, RawMetadataValue]],
owners: Optional[Sequence[str]] = None,
):
return super().__new__(
cls,
name=name,
description=description,
tags=tags or {},
run_tags=run_tags,
config_schema_snapshot=config_schema_snapshot,
dagster_type_namespace_snapshot=dagster_type_namespace_snapshot,
node_defs_snapshot=node_defs_snapshot,
dep_structure_snapshot=dep_structure_snapshot,
mode_def_snaps=mode_def_snaps,
lineage_snapshot=lineage_snapshot,
graph_def_name=graph_def_name,
metadata=normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str)
),
owners=owners,
)
@classmethod
def from_job_def(cls, job_def: JobDefinition) -> "JobSnap":
check.inst_param(job_def, "job_def", JobDefinition)
lineage = None
if job_def.op_selection_data:
lineage = JobLineageSnap(
parent_snapshot_id=job_def.op_selection_data.parent_job_def.get_job_snapshot_id(),
op_selection=sorted(job_def.op_selection_data.op_selection),
resolved_op_selection=job_def.op_selection_data.resolved_op_selection,
)
if job_def.asset_selection_data:
lineage = JobLineageSnap(
parent_snapshot_id=job_def.asset_selection_data.parent_job_def.get_job_snapshot_id(),
asset_selection=job_def.asset_selection_data.asset_selection,
asset_check_selection=job_def.asset_selection_data.asset_check_selection,
)
return JobSnap(
name=job_def.name,
description=job_def.description,
tags=job_def.tags,
run_tags=job_def.run_tags if job_def.has_separately_defined_run_tags else None,
metadata=job_def.metadata,
config_schema_snapshot=build_config_schema_snapshot(job_def),
dagster_type_namespace_snapshot=build_dagster_type_namespace_snapshot(job_def),
node_defs_snapshot=build_node_defs_snapshot(job_def),
dep_structure_snapshot=build_dep_structure_snapshot_from_graph_def(job_def.graph),
mode_def_snaps=[build_mode_def_snap(job_def)],
lineage_snapshot=lineage,
graph_def_name=job_def.graph.name,
owners=job_def.owners,
)
@cached_property
def snapshot_id(self) -> str:
return _create_job_snapshot_id(self)
def get_node_def_snap(self, node_def_name: str) -> Union[OpDefSnap, GraphDefSnap]:
check.str_param(node_def_name, "node_def_name")
for node_def_snap in self.node_defs_snapshot.op_def_snaps:
if node_def_snap.name == node_def_name:
return node_def_snap
for graph_def_snap in self.node_defs_snapshot.graph_def_snaps:
if graph_def_snap.name == node_def_name:
return graph_def_snap
check.failed("not found")
def has_node_name(self, node_name: str) -> bool:
check.str_param(node_name, "node_name")
for node_invocation_snap in self.dep_structure_snapshot.node_invocation_snaps:
if node_invocation_snap.node_name == node_name:
return True
return False
def get_config_type_from_node_def_snap(
self,
node_def_snap: Union[OpDefSnap, GraphDefSnap],
) -> Optional[ConfigType]:
check.inst_param(node_def_snap, "node_def_snap", (OpDefSnap, GraphDefSnap))
if node_def_snap.config_field_snap:
config_type_key = node_def_snap.config_field_snap.type_key
if self.config_schema_snapshot.has_config_snap(config_type_key):
return construct_config_type_from_snap(
self.config_schema_snapshot.get_config_snap(config_type_key),
self.config_schema_snapshot.all_config_snaps_by_key,
)
return None
@property
def node_names(self) -> Sequence[str]:
return [ss.node_name for ss in self.dep_structure_snapshot.node_invocation_snaps]
@property
def node_names_in_topological_order(self) -> Sequence[str]:
upstream_outputs = {}
for node_invocation_snap in self.dep_structure_snapshot.node_invocation_snaps:
node_name = node_invocation_snap.node_name
upstream_outputs[node_name] = {
upstream_output_snap.node_name
for input_dep_snap in node_invocation_snap.input_dep_snaps
for upstream_output_snap in input_dep_snap.upstream_output_snaps
}
return toposort_flatten(upstream_outputs)
def _construct_enum_from_snap(config_type_snap: ConfigTypeSnap):
enum_values = check.list_param(config_type_snap.enum_values, "enum_values", ConfigEnumValueSnap)
return Enum(
name=config_type_snap.key,
enum_values=[
EnumValue(enum_value_snap.value, description=enum_value_snap.description)
for enum_value_snap in enum_values
],
)
def _construct_fields(
config_type_snap: ConfigTypeSnap,
config_snap_map: Mapping[str, ConfigTypeSnap],
) -> Mapping[str, Field]:
fields = check.not_none(config_type_snap.fields)
return {
cast("str", field.name): Field(
construct_config_type_from_snap(config_snap_map[field.type_key], config_snap_map),
description=field.description,
is_required=field.is_required,
default_value=(
deserialize_value(cast("str", field.default_value_as_json_str))
if field.default_provided
else FIELD_NO_DEFAULT_PROVIDED
),
)
for field in fields
}
def _construct_selector_from_snap(config_type_snap, config_snap_map):
check.list_param(config_type_snap.fields, "config_field_snap", ConfigFieldSnap)
return Selector(
fields=_construct_fields(config_type_snap, config_snap_map),
description=config_type_snap.description,
)
def _construct_shape_from_snap(config_type_snap, config_snap_map):
check.list_param(config_type_snap.fields, "config_field_snap", ConfigFieldSnap)
return Shape(
fields=_construct_fields(config_type_snap, config_snap_map),
description=config_type_snap.description,
)
def _construct_permissive_from_snap(config_type_snap, config_snap_map):
check.opt_list_param(config_type_snap.fields, "config_field_snap", ConfigFieldSnap)
return Permissive(
fields=_construct_fields(config_type_snap, config_snap_map),
description=config_type_snap.description,
)
def _construct_scalar_union_from_snap(config_type_snap, config_snap_map):
check.list_param(config_type_snap.type_param_keys, "type_param_keys", str)
check.invariant(
len(config_type_snap.type_param_keys) == 2,
f"Expect SCALAR_UNION to provide a scalar key and a non scalar key. Snapshot Provided: {config_type_snap.type_param_keys}",
)
return ScalarUnion(
scalar_type=construct_config_type_from_snap(
config_snap_map[config_type_snap.type_param_keys[0]], config_snap_map
),
non_scalar_schema=construct_config_type_from_snap(
config_snap_map[config_type_snap.type_param_keys[1]], config_snap_map
),
)
def _construct_array_from_snap(config_type_snap, config_snap_map):
check.list_param(config_type_snap.type_param_keys, "type_param_keys", str)
check.invariant(
len(config_type_snap.type_param_keys) == 1,
f"Expect ARRAY to provide a single inner type. Snapshot provided: {config_type_snap.type_param_keys}",
)
return Array(
inner_type=construct_config_type_from_snap(
config_snap_map[config_type_snap.type_param_keys[0]], config_snap_map
)
)
def _construct_map_from_snap(config_type_snap, config_snap_map):
check.list_param(config_type_snap.type_param_keys, "type_param_keys", str)
check.invariant(
len(config_type_snap.type_param_keys) == 2,
f"Expect map to provide exactly two types (key, value). Snapshot provided: {config_type_snap.type_param_keys}",
)
return Map(
key_type=construct_config_type_from_snap(
config_snap_map[config_type_snap.type_param_keys[0]],
config_snap_map,
),
inner_type=construct_config_type_from_snap(
config_snap_map[config_type_snap.type_param_keys[1]],
config_snap_map,
),
# In a Map, the given_name stores the optional key_label_name
key_label_name=config_type_snap.given_name,
)
def _construct_noneable_from_snap(config_type_snap, config_snap_map):
check.list_param(config_type_snap.type_param_keys, "type_param_keys", str)
check.invariant(
len(config_type_snap.type_param_keys) == 1,
f"Expect NONEABLE to provide a single inner type. Snapshot provided: {config_type_snap.type_param_keys}",
)
return Noneable(
construct_config_type_from_snap(
config_snap_map[config_type_snap.type_param_keys[0]], config_snap_map
)
)
def construct_config_type_from_snap(
config_type_snap: ConfigTypeSnap, config_snap_map: Mapping[str, ConfigTypeSnap]
) -> ConfigType:
check.inst_param(config_type_snap, "config_type_snap", ConfigTypeSnap)
check.mapping_param(config_snap_map, "config_snap_map", key_type=str, value_type=ConfigTypeSnap)
if config_type_snap.kind in (ConfigTypeKind.SCALAR, ConfigTypeKind.ANY):
return get_builtin_scalar_by_name(config_type_snap.key)
elif config_type_snap.kind == ConfigTypeKind.ENUM:
return _construct_enum_from_snap(config_type_snap)
elif config_type_snap.kind == ConfigTypeKind.SELECTOR:
return _construct_selector_from_snap(config_type_snap, config_snap_map)
elif config_type_snap.kind == ConfigTypeKind.STRICT_SHAPE:
return _construct_shape_from_snap(config_type_snap, config_snap_map)
elif config_type_snap.kind == ConfigTypeKind.PERMISSIVE_SHAPE:
return _construct_permissive_from_snap(config_type_snap, config_snap_map)
elif config_type_snap.kind == ConfigTypeKind.SCALAR_UNION:
return _construct_scalar_union_from_snap(config_type_snap, config_snap_map)
elif config_type_snap.kind == ConfigTypeKind.ARRAY:
return _construct_array_from_snap(config_type_snap, config_snap_map)
elif config_type_snap.kind == ConfigTypeKind.MAP:
return _construct_map_from_snap(config_type_snap, config_snap_map)
elif config_type_snap.kind == ConfigTypeKind.NONEABLE:
return _construct_noneable_from_snap(config_type_snap, config_snap_map)
check.failed(f"Could not evaluate config type snap kind: {config_type_snap.kind}")
@whitelist_for_serdes(
storage_name="PipelineSnapshotLineage",
storage_field_names={
"op_selection": "solid_selection",
"resolved_op_selection": "solids_to_execute",
},
)
@record
| JobSnap |
python | django__django | django/contrib/gis/db/backends/oracle/operations.py | {
"start": 1322,
"end": 2047
} | class ____(SpatialOperator):
sql_template = "SDO_RELATE(%(lhs)s, %(rhs)s, 'mask=%(mask)s') = 'TRUE'"
def check_relate_argument(self, arg):
masks = (
"TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|"
"CONTAINS|COVERS|ANYINTERACT|ON"
)
mask_regex = re.compile(r"^(%s)(\+(%s))*$" % (masks, masks), re.I)
if not isinstance(arg, str) or not mask_regex.match(arg):
raise ValueError('Invalid SDO_RELATE mask: "%s"' % arg)
def as_sql(self, connection, lookup, template_params, sql_params):
template_params["mask"] = sql_params[-1]
return super().as_sql(connection, lookup, template_params, sql_params[:-1])
| SDORelate |
python | pallets__flask | src/flask/config.py | {
"start": 1094,
"end": 13219
} | class ____(dict): # type: ignore[type-arg]
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(
self,
root_path: str | os.PathLike[str],
defaults: dict[str, t.Any] | None = None,
) -> None:
super().__init__(defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name: str, silent: bool = False) -> bool:
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: ``True`` if the file was loaded successfully.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError(
f"The environment variable {variable_name!r} is not set"
" and as such configuration could not be loaded. Set"
" this variable and make it point to a configuration"
" file"
)
return self.from_pyfile(rv, silent=silent)
def from_prefixed_env(
self, prefix: str = "FLASK", *, loads: t.Callable[[str], t.Any] = json.loads
) -> bool:
"""Load any environment variables that start with ``FLASK_``,
dropping the prefix from the env key for the config key. Values
are passed through a loading function to attempt to convert them
to more specific types than strings.
Keys are loaded in :func:`sorted` order.
The default loading function attempts to parse values as any
valid JSON type, including dicts and lists.
Specific items in nested dicts can be set by separating the
keys with double underscores (``__``). If an intermediate key
doesn't exist, it will be initialized to an empty dict.
:param prefix: Load env vars that start with this prefix,
separated with an underscore (``_``).
:param loads: Pass each string value to this function and use
the returned value as the config value. If any error is
raised it is ignored and the value remains a string. The
default is :func:`json.loads`.
.. versionadded:: 2.1
"""
prefix = f"{prefix}_"
for key in sorted(os.environ):
if not key.startswith(prefix):
continue
value = os.environ[key]
key = key.removeprefix(prefix)
try:
value = loads(value)
except Exception:
# Keep the value as a string if loading failed.
pass
if "__" not in key:
# A non-nested key, set directly.
self[key] = value
continue
# Traverse nested dictionaries with keys separated by "__".
current = self
*parts, tail = key.split("__")
for part in parts:
# If an intermediate dict does not exist, create it.
if part not in current:
current[part] = {}
current = current[part]
current[tail] = value
return True
def from_pyfile(
self, filename: str | os.PathLike[str], silent: bool = False
) -> bool:
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: ``True`` if the file was loaded successfully.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__)
except OSError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):
return False
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
self.from_object(d)
return True
def from_object(self, obj: object | str) -> None:
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes. :meth:`from_object`
loads only the uppercase attributes of the module/class. A ``dict``
object will not work with :meth:`from_object` because the keys of a
``dict`` are not attributes of the ``dict`` class.
Example of module-based configuration::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
Nothing is done to the object before loading. If the object is a
class and has ``@property`` attributes, it needs to be
instantiated before being passed to this method.
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
See :ref:`config-dev-prod` for an example of class-based configuration
using :meth:`from_object`.
:param obj: an import name or object
"""
if isinstance(obj, str):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_file(
self,
filename: str | os.PathLike[str],
load: t.Callable[[t.IO[t.Any]], t.Mapping[str, t.Any]],
silent: bool = False,
text: bool = True,
) -> bool:
"""Update the values in the config from a file that is loaded
using the ``load`` parameter. The loaded data is passed to the
:meth:`from_mapping` method.
.. code-block:: python
import json
app.config.from_file("config.json", load=json.load)
import tomllib
app.config.from_file("config.toml", load=tomllib.load, text=False)
:param filename: The path to the data file. This can be an
absolute path or relative to the config root path.
:param load: A callable that takes a file handle and returns a
mapping of loaded data from the file.
:type load: ``Callable[[Reader], Mapping]`` where ``Reader``
implements a ``read`` method.
:param silent: Ignore the file if it doesn't exist.
:param text: Open the file in text or binary mode.
:return: ``True`` if the file was loaded successfully.
.. versionchanged:: 2.3
The ``text`` parameter was added.
.. versionadded:: 2.0
"""
filename = os.path.join(self.root_path, filename)
try:
with open(filename, "r" if text else "rb") as f:
obj = load(f)
except OSError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
return self.from_mapping(obj)
def from_mapping(
self, mapping: t.Mapping[str, t.Any] | None = None, **kwargs: t.Any
) -> bool:
"""Updates the config like :meth:`update` ignoring items with
non-upper keys.
:return: Always returns ``True``.
.. versionadded:: 0.11
"""
mappings: dict[str, t.Any] = {}
if mapping is not None:
mappings.update(mapping)
mappings.update(kwargs)
for key, value in mappings.items():
if key.isupper():
self[key] = value
return True
def get_namespace(
self, namespace: str, lowercase: bool = True, trim_namespace: bool = True
) -> dict[str, t.Any]:
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'type': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
"""
rv = {}
for k, v in self.items():
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace) :]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def __repr__(self) -> str:
return f"<{type(self).__name__} {dict.__repr__(self)}>"
| Config |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/stackdriver.py | {
"start": 1536,
"end": 25875
} | class ____(GoogleBaseHook):
"""Stackdriver Hook for connecting with Google Cloud Stackdriver."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._policy_client = None
self._channel_client = None
def _get_policy_client(self):
if not self._policy_client:
self._policy_client = monitoring_v3.AlertPolicyServiceClient()
return self._policy_client
def _get_channel_client(self):
if not self._channel_client:
self._channel_client = monitoring_v3.NotificationChannelServiceClient()
return self._channel_client
@GoogleBaseHook.fallback_to_default_project_id
def list_alert_policies(
self,
project_id: str = PROVIDE_PROJECT_ID,
format_: str | None = None,
filter_: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Fetch all the Alert Policies identified by the filter passed as filter parameter.
The desired return type can be specified by the format parameter, the supported formats
are "dict", "json" and None which returns python dictionary, stringified JSON and protobuf
respectively.
:param format_: (Optional) Desired output format of the result. The
supported formats are "dict", "json" and None which returns
python dictionary, stringified JSON and protobuf respectively.
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be included in the response.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param order_by: A comma-separated list of fields by which to sort the result.
Supports the same set of field references as the ``filter`` field. Entries
can be prefixed with a minus sign to sort by the field in descending order.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param project_id: The project to fetch alerts from.
"""
client = self._get_policy_client()
policies_ = client.list_alert_policies(
request={
"name": f"projects/{project_id}",
"filter": filter_,
"order_by": order_by,
"page_size": page_size,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
if format_ == "dict":
return [AlertPolicy.to_dict(policy) for policy in policies_]
if format_ == "json":
return [AlertPolicy.to_jsoon(policy) for policy in policies_]
return policies_
@GoogleBaseHook.fallback_to_default_project_id
def _toggle_policy_status(
self,
new_state: bool,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
client = self._get_policy_client()
policies_ = self.list_alert_policies(project_id=project_id, filter_=filter_)
for policy in policies_:
if policy.enabled != bool(new_state):
policy.enabled = bool(new_state)
mask = FieldMask(paths=["enabled"])
client.update_alert_policy(
request={"alert_policy": policy, "update_mask": mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def enable_alert_policies(
self,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Enable one or more disabled alerting policies identified by filter parameter.
Inoperative in case the policy is already enabled.
:param project_id: The project in which alert needs to be enabled.
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be enabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
self._toggle_policy_status(
new_state=True,
project_id=project_id,
filter_=filter_,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def disable_alert_policies(
self,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Disables one or more enabled alerting policies identified by filter parameter.
Inoperative in case the policy is already disabled.
:param project_id: The project in which alert needs to be disabled.
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be disabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
self._toggle_policy_status(
filter_=filter_,
project_id=project_id,
new_state=False,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def upsert_alert(
self,
alerts: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Create a new alert or updates an existing policy identified the name field in the alerts parameter.
:param project_id: The project in which alert needs to be created/updated.
:param alerts: A JSON string or file that specifies all the alerts that needs
to be either created or updated. For more details, see
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies#AlertPolicy.
(templated)
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
policy_client = self._get_policy_client()
channel_client = self._get_channel_client()
record = json.loads(alerts)
existing_policies = [
policy["name"] for policy in self.list_alert_policies(project_id=project_id, format_="dict")
]
existing_channels = [
channel["name"]
for channel in self.list_notification_channels(project_id=project_id, format_="dict")
]
policies_ = []
channels = []
for channel in record.get("channels", []):
channels.append(NotificationChannel(**channel))
for policy in record.get("policies", []):
policies_.append(AlertPolicy(**policy))
channel_name_map = {}
for channel in channels:
# This field is immutable, illegal to specifying non-default UNVERIFIED or VERIFIED, so setting default
channel.verification_status = (
monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED # type: ignore[assignment]
)
if channel.name in existing_channels:
channel_client.update_notification_channel(
request={"notification_channel": channel},
retry=retry,
timeout=timeout,
metadata=metadata,
)
else:
old_name = channel.name
del channel.name
new_channel = channel_client.create_notification_channel(
request={"name": f"projects/{project_id}", "notification_channel": channel},
retry=retry,
timeout=timeout,
metadata=metadata,
)
channel_name_map[old_name] = new_channel.name
for policy in policies_:
del policy.creation_record
del policy.mutation_record
for i, channel in enumerate(policy.notification_channels):
new_channel = channel_name_map.get(channel)
if new_channel:
policy.notification_channels[i] = new_channel
if policy.name in existing_policies:
with contextlib.suppress(InvalidArgument):
policy_client.update_alert_policy(
request={"alert_policy": policy},
retry=retry,
timeout=timeout,
metadata=metadata,
)
else:
del policy.name
for condition in policy.conditions:
del condition.name
policy_client.create_alert_policy(
request={"name": f"projects/{project_id}", "alert_policy": policy},
retry=retry,
timeout=timeout,
metadata=metadata,
)
def delete_alert_policy(
self,
name: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete an alerting policy.
:param name: The alerting policy to delete. The format is:
``projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]``.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
policy_client = self._get_policy_client()
try:
policy_client.delete_alert_policy(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
except HttpError as err:
raise AirflowException(f"Delete alerting policy failed. Error was {err.content}")
@GoogleBaseHook.fallback_to_default_project_id
def list_notification_channels(
self,
project_id: str = PROVIDE_PROJECT_ID,
format_: str | None = None,
filter_: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Fetch all the Notification Channels identified by the filter passed as filter parameter.
The desired return type can be specified by the format parameter, the supported formats are
"dict", "json" and None which returns python dictionary, stringified JSON and protobuf
respectively.
:param format_: (Optional) Desired output format of the result. The
supported formats are "dict", "json" and None which returns
python dictionary, stringified JSON and protobuf respectively.
:param filter_: If provided, this field specifies the criteria that
must be met by notification channels to be included in the response.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param order_by: A comma-separated list of fields by which to sort the result.
Supports the same set of field references as the ``filter`` field. Entries
can be prefixed with a minus sign to sort by the field in descending order.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param page_size: The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param project_id: The project to fetch notification channels from.
"""
client = self._get_channel_client()
channels = client.list_notification_channels(
request={
"name": f"projects/{project_id}",
"filter": filter_,
"order_by": order_by,
"page_size": page_size,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
if format_ == "dict":
return [NotificationChannel.to_dict(channel) for channel in channels]
if format_ == "json":
return [NotificationChannel.to_json(channel) for channel in channels]
return channels
@GoogleBaseHook.fallback_to_default_project_id
def _toggle_channel_status(
self,
new_state: bool,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
client = self._get_channel_client()
channels = client.list_notification_channels(
request={"name": f"projects/{project_id}", "filter": filter_}
)
for channel in channels:
if channel.enabled != bool(new_state):
channel.enabled = bool(new_state)
mask = FieldMask(paths=["enabled"])
client.update_notification_channel(
request={"notification_channel": channel, "update_mask": mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def enable_notification_channels(
self,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Enable one or more disabled alerting policies identified by filter parameter.
Inoperative in case the policy is already enabled.
:param project_id: The project in which notification channels needs to be enabled.
:param filter_: If provided, this field specifies the criteria that
must be met by notification channels to be enabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
self._toggle_channel_status(
project_id=project_id,
filter_=filter_,
new_state=True,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def disable_notification_channels(
self,
project_id: str,
filter_: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Disables one or more enabled notification channels identified by filter parameter.
Inoperative in case the policy is already disabled.
:param project_id: The project in which notification channels needs to be enabled.
:param filter_: If provided, this field specifies the criteria that
must be met by alert policies to be disabled.
For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
self._toggle_channel_status(
filter_=filter_,
project_id=project_id,
new_state=False,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def upsert_channel(
self,
channels: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> dict:
"""
Create a new notification or updates an existing notification channel.
Channel is identified by the name field in the alerts parameter.
:param channels: A JSON string or file that specifies all the alerts that needs
to be either created or updated. For more details, see
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannels.
(templated)
:param project_id: The project in which notification channels needs to be created/updated.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
channel_client = self._get_channel_client()
record = json.loads(channels)
existing_channels = [
channel["name"]
for channel in self.list_notification_channels(project_id=project_id, format_="dict")
]
channels_list = []
channel_name_map = {}
for channel in record["channels"]:
channels_list.append(NotificationChannel(**channel))
for channel in channels_list:
# This field is immutable, illegal to specifying non-default UNVERIFIED or VERIFIED, so setting default
channel.verification_status = (
monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED # type: ignore[assignment]
)
if channel.name in existing_channels:
channel_client.update_notification_channel(
request={"notification_channel": channel},
retry=retry,
timeout=timeout,
metadata=metadata,
)
else:
old_name = channel.name
del channel.name
new_channel = channel_client.create_notification_channel(
request={"name": f"projects/{project_id}", "notification_channel": channel},
retry=retry,
timeout=timeout,
metadata=metadata,
)
channel_name_map[old_name] = new_channel.name
return channel_name_map
def delete_notification_channel(
self,
name: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete a notification channel.
:param name: The alerting policy to delete. The format is:
``projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]``.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
channel_client = self._get_channel_client()
try:
channel_client.delete_notification_channel(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
except HttpError as err:
raise AirflowException(f"Delete notification channel failed. Error was {err.content}")
| StackdriverHook |
python | PrefectHQ__prefect | tests/server/orchestration/test_rules.py | {
"start": 2074,
"end": 2847
} | class ____:
@pytest.mark.parametrize(
["response_type", "response_details"],
[
(StateWaitDetails, StateWaitDetails(delay_seconds=20, reason="No!")),
(StateRejectDetails, StateRejectDetails(reason="I don't want to change!")),
(StateAbortDetails, StateAbortDetails(reason="I don't need to change!")),
],
ids=["wait", "reject", "abort"],
)
async def test_details_are_not_improperly_coerced(
self, response_type, response_details
):
status = SetStateStatus.ACCEPT
cast_result = OrchestrationResult(
state=None, status=status, details=response_details.model_dump()
)
assert isinstance(cast_result.details, response_type)
| TestOrchestrationResult |
python | ipython__ipython | tests/tclass.py | {
"start": 316,
"end": 920
} | class ____(object):
def __init__(self, name):
self.name = name
self.p = print
self.flush_stdout = sys.stdout.flush
def __del__(self):
self.p("tclass.py: deleting object:", self.name)
self.flush_stdout()
try:
name = sys.argv[1]
except IndexError:
pass
else:
if name.startswith("C"):
c = C(name)
# print("ARGV:", sys.argv, file=sys.stderr) # dbg
# This next print statement is NOT debugging, we're making the check on a
# completely separate process so we verify by capturing stdout:
print("ARGV 1-:", sys.argv[1:])
sys.stdout.flush()
| C |
python | encode__django-rest-framework | tests/test_negotiation.py | {
"start": 408,
"end": 530
} | class ____(BaseRenderer):
media_type = 'application/openapi+json;version=2.0'
format = 'swagger'
| MockOpenAPIRenderer |
python | optuna__optuna | optuna/storages/_rdb/alembic/versions/v1.3.0.a.py | {
"start": 694,
"end": 844
} | class ____(BaseModel):
__tablename__ = "trials"
trial_id = sa.Column(sa.Integer, primary_key=True)
number = sa.Column(sa.Integer)
| TrialModel |
python | PrefectHQ__prefect | src/prefect/server/utilities/database.py | {
"start": 12268,
"end": 12789
} | class ____(functions.GenericFunction[datetime.timedelta]):
"""Platform-independent difference of two timestamps. Computes d1 - d2."""
type: sa.Interval = sa.Interval()
inherit_cache: bool = True
def __init__(
self,
d1: _SQLExpressionOrLiteral[datetime.datetime],
d2: _SQLExpressionOrLiteral[datetime.datetime],
**kwargs: Any,
) -> None:
super().__init__(
sa.type_coerce(d1, Timestamp()), sa.type_coerce(d2, Timestamp()), **kwargs
)
| date_diff |
python | pennersr__django-allauth | allauth/socialaccount/providers/quickbooks/provider.py | {
"start": 346,
"end": 1784
} | class ____(OAuth2Provider):
id = "quickbooks"
# Name is displayed to ordinary users -- don't include protocol
name = "QuickBooks"
account_class = QuickBooksAccount
oauth2_adapter_class = QuickBooksOAuth2Adapter
def extract_uid(self, data):
if "sub" not in data:
raise ProviderException("QBO error", data)
return str(data["sub"])
def get_profile_fields(self):
default_fields = [
"address",
"sub",
"phoneNumber",
"givenName",
"familyName",
"email",
"emailVerified",
]
fields = self.get_settings().get("PROFILE_FIELDS", default_fields)
return fields
def get_default_scope(self):
scope = [
"openid",
"com.intuit.quickbooks.accounting",
"profile",
"phone",
]
if app_settings.QUERY_EMAIL:
scope.append("email")
return scope
def extract_common_fields(self, data):
return dict(
email=data.get("email"),
address=data.get("address"),
sub=data.get("sub"),
givenName=data.get("givenName"),
familynName=data.get("familyName"),
emailVerified=data.get("emailVerified"),
phoneNumber=data.get("phoneNumber"),
)
provider_classes = [QuickBooksOAuth2Provider]
| QuickBooksOAuth2Provider |
python | plotly__plotly.py | plotly/graph_objs/scatterternary/_stream.py | {
"start": 233,
"end": 3546
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary"
_path_str = "scatterternary.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterternary.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | SmileyChris__easy-thumbnails | demoproject/mainapp/models.py | {
"start": 88,
"end": 268
} | class ____(models.Model):
title = models.CharField(max_length=100)
image = ThumbnailerImageField(upload_to="images")
def __str__(self):
return self.title
| TestImage |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_indexing.py | {
"start": 13042,
"end": 15459
} | class ____:
# This is adapted from pandas/tests/arrays/masked/test_indexing.py
def _check_setitem_invalid(self, ser, invalid, indexer):
orig_ser = ser.copy()
with pytest.raises(TypeError, match="Invalid value"):
ser[indexer] = invalid
ser = orig_ser.copy()
with pytest.raises(TypeError, match="Invalid value"):
ser.iloc[indexer] = invalid
ser = orig_ser.copy()
with pytest.raises(TypeError, match="Invalid value"):
ser.loc[indexer] = invalid
ser = orig_ser.copy()
with pytest.raises(TypeError, match="Invalid value"):
ser[:] = invalid
def _check_setitem_valid(self, ser, value, indexer):
orig_ser = ser.copy()
ser[indexer] = value
ser = orig_ser.copy()
ser.iloc[indexer] = value
ser = orig_ser.copy()
ser.loc[indexer] = value
ser = orig_ser.copy()
ser[:] = value
_invalid_scalars = [
1 + 2j,
"True",
"1",
"1.0",
NaT,
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
_indexers = [0, [0], slice(0, 1), [True, False, False], slice(None, None, None)]
@pytest.mark.parametrize(
"invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]
)
@pytest.mark.parametrize("indexer", _indexers)
def test_setitem_validation_scalar_bool(self, invalid, indexer):
ser = Series([True, False, False], dtype="bool")
self._check_setitem_invalid(ser, invalid, indexer)
@pytest.mark.parametrize("invalid", _invalid_scalars + [True, 1.5, np.float64(1.5)])
@pytest.mark.parametrize("indexer", _indexers)
def test_setitem_validation_scalar_int(self, invalid, any_int_numpy_dtype, indexer):
ser = Series([1, 2, 3], dtype=any_int_numpy_dtype)
if isna(invalid) and invalid is not NaT and not np.isnat(invalid):
self._check_setitem_valid(ser, invalid, indexer)
else:
self._check_setitem_invalid(ser, invalid, indexer)
@pytest.mark.parametrize("invalid", _invalid_scalars + [True])
@pytest.mark.parametrize("indexer", _indexers)
def test_setitem_validation_scalar_float(self, invalid, float_numpy_dtype, indexer):
ser = Series([1, 2, None], dtype=float_numpy_dtype)
self._check_setitem_invalid(ser, invalid, indexer)
| TestSetitemValidation |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_template.py | {
"start": 3928,
"end": 5247
} | class ____(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc. See the cairo
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In cairo this is done by wrapping a cairo.Context object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the cairo backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as an RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
########################################################################
#
# The following functions and classes are for pyplot and implement
# window/figure managers, etc.
#
########################################################################
| GraphicsContextTemplate |
python | aio-libs__aiohttp | tests/test_multipart.py | {
"start": 4274,
"end": 23900
} | class ____:
async def test_next(self) -> None:
with Stream(b"Hello, world!\r\n--:") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.next()
assert b"Hello, world!" == result
assert obj.at_eof()
async def test_next_next(self) -> None:
with Stream(b"Hello, world!\r\n--:") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.next()
assert b"Hello, world!" == result
assert obj.at_eof()
result = await obj.next()
assert result is None
async def test_read(self) -> None:
with Stream(b"Hello, world!\r\n--:") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.read()
assert b"Hello, world!" == result
assert obj.at_eof()
async def test_read_chunk_at_eof(self) -> None:
with Stream(b"--:") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
obj._at_eof = True
result = await obj.read_chunk()
assert b"" == result
async def test_read_chunk_without_content_length(self) -> None:
with Stream(b"Hello, world!\r\n--:") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
c1 = await obj.read_chunk(8)
c2 = await obj.read_chunk(8)
c3 = await obj.read_chunk(8)
assert c1 + c2 == b"Hello, world!"
assert c3 == b""
async def test_read_incomplete_chunk(self) -> None:
with Stream(b"") as stream:
def prepare(data: bytes) -> bytes:
return data
with mock.patch.object(
stream,
"read",
side_effect=[
prepare(b"Hello, "),
prepare(b"World"),
prepare(b"!\r\n--:"),
prepare(b""),
],
):
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
c1 = await obj.read_chunk(8)
assert c1 == b"Hello, "
c2 = await obj.read_chunk(8)
assert c2 == b"World"
c3 = await obj.read_chunk(8)
assert c3 == b"!"
async def test_read_all_at_once(self) -> None:
with Stream(b"Hello, World!\r\n--:--\r\n") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.read_chunk()
assert b"Hello, World!" == result
result = await obj.read_chunk()
assert b"" == result
assert obj.at_eof()
async def test_read_incomplete_body_chunked(self) -> None:
with Stream(b"Hello, World!\r\n-") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = b""
with pytest.raises(AssertionError):
for _ in range(4):
result += await obj.read_chunk(7)
assert b"Hello, World!\r\n-" == result
async def test_read_boundary_with_incomplete_chunk(self) -> None:
with Stream(b"") as stream:
def prepare(data: bytes) -> bytes:
return data
with mock.patch.object(
stream,
"read",
side_effect=[
prepare(b"Hello, World"),
prepare(b"!\r\n"),
prepare(b"--:"),
prepare(b""),
],
):
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
c1 = await obj.read_chunk(12)
assert c1 == b"Hello, World"
c2 = await obj.read_chunk(8)
assert c2 == b"!"
c3 = await obj.read_chunk(8)
assert c3 == b""
async def test_multi_read_chunk(self) -> None:
with Stream(b"Hello,\r\n--:\r\n\r\nworld!\r\n--:--") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.read_chunk(8)
assert b"Hello," == result
result = await obj.read_chunk(8)
assert b"" == result
assert obj.at_eof()
async def test_read_chunk_properly_counts_read_bytes(self) -> None:
expected = b"." * 10
size = len(expected)
h = CIMultiDictProxy(CIMultiDict({"CONTENT-LENGTH": str(size)}))
with StreamWithShortenRead(expected + b"\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = bytearray()
while True:
chunk = await obj.read_chunk()
if not chunk:
break
result.extend(chunk)
assert size == len(result)
assert b"." * size == result
assert obj.at_eof()
async def test_read_does_not_read_boundary(self) -> None:
with Stream(b"Hello, world!\r\n--:") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.read()
assert b"Hello, world!" == result
assert b"--:" == (await stream.read())
async def test_multiread(self) -> None:
with Stream(b"Hello,\r\n--:\r\n\r\nworld!\r\n--:--") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.read()
assert b"Hello," == result
result = await obj.read()
assert b"" == result
assert obj.at_eof()
async def test_read_multiline(self) -> None:
with Stream(b"Hello\n,\r\nworld!\r\n--:--") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.read()
assert b"Hello\n,\r\nworld!" == result
result = await obj.read()
assert b"" == result
assert obj.at_eof()
async def test_read_respects_content_length(self) -> None:
h = CIMultiDictProxy(CIMultiDict({"CONTENT-LENGTH": "100500"}))
with Stream(b"." * 100500 + b"\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.read()
assert b"." * 100500 == result
assert obj.at_eof()
async def test_read_with_content_encoding_gzip(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_ENCODING: "gzip"}))
with Stream(
b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU"
b"(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00"
b"\r\n--:--"
) as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_read_with_content_encoding_deflate(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_ENCODING: "deflate"}))
with Stream(b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_read_with_content_encoding_identity(self) -> None:
thing = (
b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU"
b"(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00"
b"\r\n"
)
h = CIMultiDictProxy(CIMultiDict({CONTENT_ENCODING: "identity"}))
with Stream(thing + b"--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.read(decode=True)
assert thing[:-2] == result
async def test_read_with_content_encoding_unknown(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_ENCODING: "snappy"}))
with Stream(b"\x0e4Time to Relax!\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
with pytest.raises(RuntimeError):
await obj.read(decode=True)
async def test_read_with_content_transfer_encoding_base64(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TRANSFER_ENCODING: "base64"}))
with Stream(b"VGltZSB0byBSZWxheCE=\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_decode_with_content_transfer_encoding_base64(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TRANSFER_ENCODING: "base64"}))
with Stream(b"VG\r\r\nltZSB0byBSZ\r\nWxheCE=\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = b""
while not obj.at_eof():
chunk = await obj.read_chunk(size=6)
result += obj.decode(chunk)
assert b"Time to Relax!" == result
async def test_read_with_content_transfer_encoding_quoted_printable(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TRANSFER_ENCODING: "quoted-printable"})
)
with Stream(
b"=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82, =D0=BC=D0=B8=D1=80!\r\n--:--"
) as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.read(decode=True)
expected = (
b"\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,"
b" \xd0\xbc\xd0\xb8\xd1\x80!"
)
assert result == expected
@pytest.mark.parametrize("encoding", ("binary", "8bit", "7bit"))
async def test_read_with_content_transfer_encoding_binary(
self, encoding: str
) -> None:
data = (
b"\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,"
b" \xd0\xbc\xd0\xb8\xd1\x80!"
)
h = CIMultiDictProxy(CIMultiDict({CONTENT_TRANSFER_ENCODING: encoding}))
with Stream(data + b"\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.read(decode=True)
assert data == result
async def test_read_with_content_transfer_encoding_unknown(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TRANSFER_ENCODING: "unknown"}))
with Stream(b"\x0e4Time to Relax!\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
with pytest.raises(RuntimeError):
await obj.read(decode=True)
async def test_read_text(self) -> None:
with Stream(b"Hello, world!\r\n--:--") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.text()
assert "Hello, world!" == result
async def test_read_text_default_encoding(self) -> None:
with Stream("Привет, Мир!\r\n--:--".encode()) as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.text()
assert "Привет, Мир!" == result
async def test_read_text_encoding(self) -> None:
with Stream("Привет, Мир!\r\n--:--".encode("cp1251")) as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.text(encoding="cp1251")
assert "Привет, Мир!" == result
async def test_read_text_guess_encoding(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TYPE: "text/plain;charset=cp1251"}))
with Stream("Привет, Мир!\r\n--:--".encode("cp1251")) as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.text()
assert "Привет, Мир!" == result
async def test_read_text_compressed(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_ENCODING: "deflate", CONTENT_TYPE: "text/plain"})
)
with Stream(b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.text()
assert "Time to Relax!" == result
async def test_read_text_while_closed(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TYPE: "text/plain"}))
with Stream(b"") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
obj._at_eof = True
result = await obj.text()
assert "" == result
async def test_read_json(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TYPE: "application/json"}))
with Stream(b'{"test": "passed"}\r\n--:--') as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.json()
assert {"test": "passed"} == result
async def test_read_json_encoding(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TYPE: "application/json"}))
with Stream('{"тест": "пассед"}\r\n--:--'.encode("cp1251")) as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.json(encoding="cp1251")
assert {"тест": "пассед"} == result
async def test_read_json_guess_encoding(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: "application/json; charset=cp1251"})
)
with Stream('{"тест": "пассед"}\r\n--:--'.encode("cp1251")) as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.json()
assert {"тест": "пассед"} == result
async def test_read_json_compressed(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_ENCODING: "deflate", CONTENT_TYPE: "application/json"})
)
with Stream(b"\xabV*I-.Q\xb2RP*H,.NMQ\xaa\x05\x00\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.json()
assert {"test": "passed"} == result
async def test_read_json_while_closed(self) -> None:
h = CIMultiDictProxy(CIMultiDict({CONTENT_TYPE: "application/json"}))
with Stream(b"") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
obj._at_eof = True
result = await obj.json()
assert result is None
async def test_read_form(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: "application/x-www-form-urlencoded"})
)
with Stream(b"foo=bar&foo=baz&boo=\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.form()
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_invalid_utf8(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: "application/x-www-form-urlencoded"})
)
with Stream(b"\xff\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
with pytest.raises(
ValueError, match="data cannot be decoded with utf-8 encoding"
):
await obj.form()
async def test_read_form_encoding(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: "application/x-www-form-urlencoded"})
)
with Stream("foo=bar&foo=baz&boo=\r\n--:--".encode("cp1251")) as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.form(encoding="cp1251")
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_guess_encoding(self) -> None:
h = CIMultiDictProxy(
CIMultiDict(
{CONTENT_TYPE: "application/x-www-form-urlencoded; charset=utf-8"}
)
)
with Stream(b"foo=bar&foo=baz&boo=\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
result = await obj.form()
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_while_closed(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_TYPE: "application/x-www-form-urlencoded"})
)
with Stream(b"") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
obj._at_eof = True
result = await obj.form()
assert not result
async def test_readline(self) -> None:
with Stream(b"Hello\n,\r\nworld!\r\n--:--") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
result = await obj.readline()
assert b"Hello\n" == result
result = await obj.readline()
assert b",\r\n" == result
result = await obj.readline()
assert b"world!" == result
result = await obj.readline()
assert b"" == result
assert obj.at_eof()
async def test_release(self) -> None:
with Stream(b"Hello,\r\n--:\r\n\r\nworld!\r\n--:--") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
await obj.release()
assert obj.at_eof()
assert b"--:\r\n\r\nworld!\r\n--:--" == stream.content.read()
async def test_release_respects_content_length(self) -> None:
h = CIMultiDictProxy(CIMultiDict({"CONTENT-LENGTH": "100500"}))
with Stream(b"." * 100500 + b"\r\n--:--") as stream:
obj = aiohttp.BodyPartReader(BOUNDARY, h, stream)
await obj.release()
assert obj.at_eof()
async def test_release_release(self) -> None:
with Stream(b"Hello,\r\n--:\r\n\r\nworld!\r\n--:--") as stream:
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
await obj.release()
await obj.release()
assert b"--:\r\n\r\nworld!\r\n--:--" == stream.content.read()
async def test_filename(self) -> None:
h = CIMultiDictProxy(
CIMultiDict({CONTENT_DISPOSITION: "attachment; filename=foo.html"})
)
part = aiohttp.BodyPartReader(BOUNDARY, h, mock.Mock())
assert "foo.html" == part.filename
async def test_reading_long_part(self) -> None:
size = 2 * 2**16
protocol = mock.Mock(_reading_paused=False)
stream = StreamReader(protocol, 2**16, loop=asyncio.get_event_loop())
stream.feed_data(b"0" * size + b"\r\n--:--")
stream.feed_eof()
d = CIMultiDictProxy[str](CIMultiDict())
obj = aiohttp.BodyPartReader(BOUNDARY, d, stream)
data = await obj.read()
assert len(data) == size
| TestPartReader |
python | google__pytype | pytype/tests/test_typevar2.py | {
"start": 18270,
"end": 28505
} | class ____(test_base.BaseTest):
"""Tests for generic type aliases ("type macros")."""
def test_homogeneous_tuple(self):
ty = self.Infer("""
from typing import Tuple, TypeVar
T = TypeVar('T')
X = Tuple[T, ...]
def f(x: X[int]):
pass
f((0, 1, 2)) # should not raise an error
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple, TypeVar
T = TypeVar('T')
X = Tuple[T, ...]
def f(x: Tuple[int, ...]) -> None: ...
""",
)
def test_heterogeneous_tuple(self):
ty, _ = self.InferWithErrors("""
from typing import Tuple, TypeVar
T = TypeVar('T')
X = Tuple[T]
def f(x: X[int]):
pass
f((0, 1, 2)) # wrong-arg-types
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple, TypeVar
T = TypeVar('T')
X = Tuple[T]
def f(x: Tuple[int]) -> None: ...
""",
)
def test_substitute_typevar(self):
foo_ty = self.Infer("""
from typing import List, TypeVar
T1 = TypeVar('T1')
T2 = TypeVar('T2')
X = List[T1]
def f(x: X[T2]) -> T2:
return x[0]
""")
self.assertTypesMatchPytd(
foo_ty,
"""
from typing import List, TypeVar
T1 = TypeVar('T1')
T2 = TypeVar('T2')
X = List[T1]
def f(x: List[T2]) -> T2: ...
""",
)
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo_ty))
ty = self.Infer(
"""
import foo
from typing import TypeVar
T = TypeVar('T')
def f(x: T) -> foo.X[T]:
return [x]
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List, TypeVar
T = TypeVar('T')
def f(x: T) -> List[T]: ...
""",
)
def test_substitute_value(self):
foo_ty = self.Infer("""
from typing import List, TypeVar
T = TypeVar('T')
X = List[T]
def f(x: X[int]) -> int:
return x[0]
""")
self.assertTypesMatchPytd(
foo_ty,
"""
from typing import List, TypeVar
T = TypeVar('T')
X = List[T]
def f(x: List[int]) -> int: ...
""",
)
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo_ty))
ty = self.Infer(
"""
import foo
def f(x: int) -> foo.X[int]:
return [x]
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List
def f(x: int) -> List[int]: ...
""",
)
def test_partial_substitution(self):
ty = self.Infer("""
from typing import Dict, TypeVar
T = TypeVar('T')
X = Dict[T, str]
def f(x: X[int]) -> int:
return next(iter(x.keys()))
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, TypeVar
T = TypeVar('T')
X = Dict[T, str]
def f(x: Dict[int, str]) -> int: ...
""",
)
def test_callable(self):
ty = self.Infer("""
from typing import Callable, TypeVar
T = TypeVar('T')
X = Callable[[T], str]
def f() -> X[int]:
def g(x: int):
return str(x)
return g
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, TypeVar
T = TypeVar('T')
X = Callable[[T], str]
def f() -> Callable[[int], str]: ...
""",
)
def test_import_callable(self):
foo = self.Infer("""
from typing import TypeVar
T = TypeVar('T')
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo))
bar = self.Infer(
"""
import foo
from typing import Callable
X = Callable[[foo.T], foo.T]
""",
pythonpath=[d.path],
)
d.create_file("bar.pyi", pytd_utils.Print(bar))
ty = self.Infer(
"""
import foo
import bar
def f(x: foo.T, y: bar.X[foo.T]):
pass
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import bar
import foo
from typing import Callable, TypeVar
T = TypeVar('T')
def f(x: T, y: Callable[[T], T]) -> None: ...
""",
)
def test_union_typevar(self):
ty = self.Infer("""
from typing import TypeVar, Union
T1 = TypeVar('T1')
T2 = TypeVar('T2')
X = Union[int, T1]
def f(x: X[T2], y: T2):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypeVar, Union
T1 = TypeVar('T1')
T2 = TypeVar('T2')
X = Union[int, T1]
def f(x: Union[int, T2], y: T2) -> None: ...
""",
)
def test_union_value(self):
ty = self.Infer("""
from typing import TypeVar, Union
T = TypeVar('T')
X = Union[int, T]
def f(x: X[str]):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union, TypeVar
T = TypeVar('T')
X = Union[int, T]
def f(x: Union[int, str]) -> None: ...
""",
)
def test_extra_parameter(self):
errors = self.CheckWithErrors("""
from typing import Dict, TypeVar
T = TypeVar('T')
X = Dict[T, T]
def f(x: X[int, str]): # invalid-annotation[e]
pass
""")
self.assertErrorRegexes(errors, {"e": r"expected 1 parameter, got 2"})
def test_missing_parameter(self):
errors = self.CheckWithErrors("""
from typing import Dict, TypeVar
T1 = TypeVar('T1')
T2 = TypeVar('T2')
X = Dict[T1, T2]
def f(x: X[int]): # invalid-annotation[e]
pass
""")
self.assertErrorRegexes(errors, {"e": r"expected 2 parameters, got 1"})
def test_nested_typevars(self):
ty = self.Infer("""
from typing import Callable, Dict, TypeVar
K = TypeVar('K')
V = TypeVar('V')
X = Callable[[int], Dict[K, V]]
def f(x: X[float, str]):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, Dict, TypeVar
K = TypeVar('K')
V = TypeVar('V')
X = Callable[[int], Dict[K, V]]
def f(x: Callable[[int], Dict[float, str]]) -> None: ...
""",
)
def test_extra_nested_parameter(self):
ty, errors = self.InferWithErrors("""
from typing import Callable, Dict, TypeVar
K = TypeVar('K')
V = TypeVar('V')
X = Callable[[int], Dict[K, V]]
def f(x: X[float, str, complex]): # invalid-annotation[e]
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, Dict, TypeVar
K = TypeVar('K')
V = TypeVar('V')
X = Callable[[int], Dict[K, V]]
def f(x: Callable[[int], Dict[float, str]]) -> None: ...
""",
)
self.assertErrorRegexes(errors, {"e": r"expected 2 parameters, got 3"})
def test_missing_nested_parameter(self):
ty, errors = self.InferWithErrors("""
from typing import Callable, Dict, TypeVar
K = TypeVar('K')
V = TypeVar('V')
X = Callable[[int], Dict[K, V]]
def f(x: X[float]): # invalid-annotation[e]
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Callable, Dict, TypeVar
K = TypeVar('K')
V = TypeVar('V')
X = Callable[[int], Dict[K, V]]
def f(x: Callable[[int], Dict[float, Any]]) -> None: ...
""",
)
self.assertErrorRegexes(errors, {"e": r"expected 2 parameters, got 1"})
def test_reingest_union(self):
foo = self.Infer("""
from typing import Optional, TypeVar
T = TypeVar('T')
X = Optional[T]
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo))
ty = self.Infer(
"""
import foo
from typing import TypeVar
T = TypeVar('T')
def f1(x: foo.X[int]):
pass
def f2(x: foo.X[T]) -> T:
assert x
return x
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Optional, TypeVar
T = TypeVar('T')
def f1(x: Optional[int]) -> None: ...
def f2(x: Optional[T]) -> T: ...
""",
)
def test_multiple_options(self):
# distilled from real user code
ty = self.Infer("""
from typing import Any, Mapping, Sequence, TypeVar, Union
K = TypeVar('K')
V = TypeVar('V')
X = Union[Sequence, Mapping[K, Any], V]
try:
Y = X[str, V]
except TypeError:
Y = Union[Sequence, Mapping[str, Any], V]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Mapping, Sequence, TypeVar, Union
K = TypeVar('K')
V = TypeVar('V')
X = Union[Sequence, Mapping[K, Any], V]
Y = Union[Sequence, Mapping[str, Any], V]
""",
)
def test_multiple_typevar_options(self):
ty = self.Infer("""
from typing import TypeVar
if __random__:
T1 = TypeVar('T1')
T2 = TypeVar('T2')
else:
T1 = TypeVar('T1')
T2 = TypeVar('T2', bound=str)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, TypeVar
T1 = TypeVar('T1')
T2: Any
""",
)
def test_unparameterized_typevar_alias(self):
err = self.CheckWithErrors("""
from typing import TypeVar
T = TypeVar('T')
U = TypeVar('U')
Foo = list[T]
Bar = dict[T, U]
def f(x: Foo) -> int: # invalid-annotation[e]
return 42
def g(x: Foo) -> T:
return 42
def h(x: Foo[T]) -> int: # invalid-annotation
return 42
def h(x: Foo, y: Bar) -> int: # invalid-annotation
return 42
def j(x: Foo, y: Bar, z: U) -> int:
return 42
""")
self.assertErrorSequences(err, {"e": ["Foo is a generic alias"]})
| GenericTypeAliasTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver18.py | {
"start": 1237,
"end": 1659
} | class ____(Generic[_P]):
def method1(self, val: str, *args: _P.args, **kwargs: _P.kwargs) -> None:
pass
def decorator2() -> Callable[[Callable[_P, None]], ClassB[_P]]: ...
@decorator2()
def func7(y: int) -> None:
pass
reveal_type(func7, expected_text="ClassB[(y: int)]")
reveal_type(func7.method1, expected_text="(val: str, y: int) -> None")
reveal_type(func7.method1("", 1), expected_text="None")
| ClassB |
python | pytorch__pytorch | torch/_higher_order_ops/while_loop.py | {
"start": 27804,
"end": 36521
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(
ctx,
cond_fn,
body_fn,
num_carried_inputs,
num_additional_inputs,
*carries_and_inputs,
):
from torch._higher_order_ops.scan import split_into_chunks
carries, additional_inputs = split_into_chunks(
carries_and_inputs, [num_carried_inputs, num_additional_inputs]
)
with torch._C._AutoDispatchBelowAutograd():
fw_outputs = while_loop_stack_output_op(
cond_fn, body_fn, carries, additional_inputs
)
assert not hasattr(ctx, "fw_cond_fn")
assert not hasattr(ctx, "fw_body_fn")
assert not hasattr(ctx, "carries")
assert not hasattr(ctx, "additional_inputs")
assert not hasattr(ctx, "fw_outputs")
ctx.fw_cond_fn = cond_fn
ctx.fw_body_fn = body_fn
ctx.carries = carries
ctx.additional_inputs = additional_inputs
ctx.fw_outputs = fw_outputs
loop_count = None
# pyrefly: ignore [bad-assignment]
for out in fw_outputs:
if isinstance(out, torch.Tensor):
if loop_count is not None:
assert out.size(0) == loop_count
else:
loop_count = out.size(0)
assert loop_count is not None
# Remove the loop_count from pending_fresh_unbacked_symbols
# because it's not part of forward output and it's impossible
# to bind it to a proxy in forward graph anyways.
if (
isinstance(loop_count, torch.SymInt)
and (shape_env := loop_count.node.shape_env)
and loop_count in shape_env.pending_fresh_unbacked_symbols
):
shape_env.pending_fresh_unbacked_symbols.remove(loop_count)
# Even when body function is not executed, we clone and unsqueeze the input
# to avoid the aliasing, therefore loop_count is always >= 1
torch._check(loop_count >= 1)
# We snapshot the dispatch keys in forward for materializing the
# the bw_graph in backward.
ctx._fw_include_key_set = torch._C._dispatch_tls_local_include_set()
ctx._fw_exclude_key_set = torch._C._dispatch_tls_local_exclude_set()
assert len(fw_outputs) > 0, "fw_outputs shouldn't be empty"
# Only the last of the output fw_outputs need to be returned
return tuple(ckp[-1] for ckp in fw_outputs)
@staticmethod
def backward(ctx, *grads):
from torch._higher_order_ops.cond import create_bw_fn
from torch._higher_order_ops.scan import split_into_chunks
# set up single step bw fn
bw_body_fn = create_bw_fn(ctx.fw_body_fn, ctx.carries + ctx.additional_inputs)
# Note [Handle inputs that're not differentiable]
# When a forward input is non-differentiable e.g. a symint or an integer tensor, their gradients
# will be None. However, we don't want to return None in the subgraph because this complicates the
# inductor codegen, where we need to do a non-uniform treatment for None and tensors.
# So we set up masks and filter the None gradients so that only tensors are returned from each step.
carries_tensor_masks = [
bool(isinstance(t, torch.Tensor) and t.dtype.is_floating_point)
for t in ctx.carries
]
additional_inputs_tensor_masks = [
bool(isinstance(t, torch.Tensor) and t.dtype.is_floating_point)
for t in ctx.additional_inputs
]
init_idx = torch.zeros((), dtype=torch.int64)
init_grad_carries = filter_with_masks(grads, carries_tensor_masks) # type: ignore[arg-type]
init_grad_additional_inputs = tuple(
torch.zeros_like(t)
for need_keep, t in zip(
additional_inputs_tensor_masks, ctx.additional_inputs
)
if need_keep
)
# We need to the forward inputs to each iteration to compute the backward
# which is the concatenation of first iteraiton input i.e. ctx.carries and all iterations's
# output except the last iteration.
fw_carries = [
torch.cat([carry.unsqueeze(0), carries[:-1]])
for carry, carries in zip(ctx.carries, ctx.fw_outputs)
]
for fw_carry, carry in zip(fw_carries, ctx.carries):
fw_carry.requires_grad_(carry.requires_grad)
_, spec = pytree.tree_flatten(
(
init_idx,
init_grad_carries,
init_grad_additional_inputs,
ctx.fw_outputs,
ctx.additional_inputs,
)
)
def cond_fn(*flat_args):
(
idx,
grad_carries,
grad_additional_inputs,
fw_carries,
additional_inputs,
) = pytree.tree_unflatten(flat_args, spec)
assert isinstance(fw_carries[0], torch.Tensor), fw_carries[0]
# excluding the last iteration's output
return idx < fw_carries[0].size(0)
def body_fn(*flat_args):
(
idx,
grad_carries,
grad_additional_inputs,
fw_carries,
additional_inputs,
) = pytree.tree_unflatten(flat_args, spec)
reversed_idx = fw_carries[0].size(0) - idx - 1
selected_fw_carries = [
ckp.select(0, reversed_idx.item()) for ckp in fw_carries
]
cur_grad_carries, cur_grad_additional_inputs = split_into_chunks(
bw_body_fn(*selected_fw_carries, *additional_inputs, *grad_carries),
[len(ctx.carries), len(ctx.additional_inputs)],
)
assert all(isinstance(t, torch.Tensor) for t in cur_grad_carries)
cur_grad_carries_tensors = filter_with_masks(
cur_grad_carries, carries_tensor_masks
)
cur_grad_additional_inputs_tensors = filter_with_masks(
cur_grad_additional_inputs, additional_inputs_tensor_masks
)
return (
idx + 1,
*cur_grad_carries_tensors,
*(
cur_grad + grad
for cur_grad, grad in zip(
cur_grad_additional_inputs_tensors, grad_additional_inputs
)
),
)
args_single_step_bw = (
init_idx,
*init_grad_carries,
*init_grad_additional_inputs,
*fw_carries,
*ctx.additional_inputs,
)
cond_gm = materialize_as_graph(
cond_fn,
args_single_step_bw,
ctx._fw_include_key_set,
ctx._fw_exclude_key_set,
force_enable_grad=True,
)
body_gm = materialize_as_graph(
body_fn,
args_single_step_bw,
ctx._fw_include_key_set,
ctx._fw_exclude_key_set,
force_enable_grad=True,
)
_, final_grad_carries, final_grad_additional_inputs = split_into_chunks(
while_loop_op(
cond_gm,
body_gm,
# pyrefly: ignore [bad-argument-type]
(
init_idx,
*init_grad_carries,
*init_grad_additional_inputs,
),
(*fw_carries, *ctx.additional_inputs),
),
[1, len(init_grad_carries), len(init_grad_additional_inputs)],
)
return (
None,
None,
None,
None,
*fill_none_with_masks(final_grad_carries, carries_tensor_masks),
*fill_none_with_masks(
final_grad_additional_inputs, additional_inputs_tensor_masks
),
)
while_loop_stack_output_op = WhileLoopStackOutputOp()
while_loop_stack_output_op.py_impl(DispatchKey.CompositeExplicitAutograd)(
functools.partial(while_loop_dense, stack_output=True)
)
while_loop_stack_output_op.py_impl(ProxyTorchDispatchMode)(
functools.partial(while_loop_tracing, stack_output=True)
)
while_loop_stack_output_op.py_impl(FakeTensorMode)(
functools.partial(while_loop_fake_tensor_mode, stack_output=True)
)
while_loop_stack_output_op.py_functionalize_impl(
functools.partial(while_loop_func, stack_output=True)
)
while_loop_stack_output_op.py_autograd_impl(
autograd_not_implemented(while_loop_stack_output_op, deferred_error=True)
)
| WhileLoopAutogradOp |
python | getsentry__sentry | tests/sentry/issues/test_ignored.py | {
"start": 687,
"end": 2596
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.group = self.create_group()
self.group_list = [self.group]
self.group_ids = [self.group]
add_group_to_inbox(self.group, GroupInboxReason.NEW)
def test_ignored_forever(self) -> None:
status_details = handle_ignored(self.group_list, {}, self.user)
assert status_details == {}
assert not GroupInbox.objects.filter(group=self.group).exists()
assert not GroupSnooze.objects.filter(group=self.group).exists()
def test_ignored_duration(self) -> None:
status_details = handle_ignored(self.group_list, {"ignoreDuration": 30}, self.user)
assert status_details is not None
assert not GroupInbox.objects.filter(group=self.group).exists()
snooze = GroupSnooze.objects.filter(group=self.group).get()
assert snooze.until == status_details.get("ignoreUntil")
def test_ignored_count(self) -> None:
status_details = handle_ignored(self.group_list, {"ignoreCount": 50}, self.user)
assert status_details is not None
assert not GroupInbox.objects.filter(group=self.group).exists()
snooze = GroupSnooze.objects.filter(group=self.group).get()
assert snooze.count == status_details.get("ignoreCount")
def test_ignored_user_count(self) -> None:
status_details = handle_ignored(self.group_list, {"ignoreUserCount": 100}, self.user)
assert status_details is not None
assert not GroupInbox.objects.filter(group=self.group).exists()
snooze = GroupSnooze.objects.filter(group=self.group).get()
assert snooze.user_count == status_details.get("ignoreUserCount")
assert Group.objects.get(id=self.group.id).status == GroupStatus.IGNORED
assert Group.objects.get(id=self.group.id).substatus == GroupSubStatus.UNTIL_CONDITION_MET
| HandleIgnoredTest |
python | plotly__plotly.py | plotly/graph_objs/layout/smith/_imaginaryaxis.py | {
"start": 235,
"end": 28488
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.smith"
_path_str = "layout.smith.imaginaryaxis"
_valid_props = {
"color",
"gridcolor",
"griddash",
"gridwidth",
"hoverformat",
"labelalias",
"layer",
"linecolor",
"linewidth",
"showgrid",
"showline",
"showticklabels",
"showtickprefix",
"showticksuffix",
"tickcolor",
"tickfont",
"tickformat",
"ticklen",
"tickprefix",
"ticks",
"ticksuffix",
"tickvals",
"tickvalssrc",
"tickwidth",
"visible",
}
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def layer(self):
"""
Sets the layer on which this axis is displayed. If *above
traces*, this axis is displayed above all the subplot's traces
If *below traces*, this axis is displayed below all the
subplot's traces, but above the grid lines. Useful when used
together with scatter-like traces with `cliponaxis` set to
False to show markers and/or text nodes above this axis.
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['above traces', 'below traces']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.smith.imaginaryaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.layout.smith.imaginaryaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Defaults to
`realaxis.tickvals` plus the same as negatives and zero.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
ticklen
Sets the tick length (in px).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
tickvals
Sets the values at which ticks on this axis appear.
Defaults to `realaxis.tickvals` plus the same as
negatives and zero.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
"""
def __init__(
self,
arg=None,
color=None,
gridcolor=None,
griddash=None,
gridwidth=None,
hoverformat=None,
labelalias=None,
layer=None,
linecolor=None,
linewidth=None,
showgrid=None,
showline=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
tickcolor=None,
tickfont=None,
tickformat=None,
ticklen=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
visible=None,
**kwargs,
):
"""
Construct a new Imaginaryaxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.smith.Imaginaryaxis`
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
ticklen
Sets the tick length (in px).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
tickvals
Sets the values at which ticks on this axis appear.
Defaults to `realaxis.tickvals` plus the same as
negatives and zero.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
Returns
-------
Imaginaryaxis
"""
super().__init__("imaginaryaxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.smith.Imaginaryaxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.smith.Imaginaryaxis`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("griddash", arg, griddash)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("hoverformat", arg, hoverformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("layer", arg, layer)
self._set_property("linecolor", arg, linecolor)
self._set_property("linewidth", arg, linewidth)
self._set_property("showgrid", arg, showgrid)
self._set_property("showline", arg, showline)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Imaginaryaxis |
python | pennersr__django-allauth | allauth/socialaccount/providers/telegram/provider.py | {
"start": 246,
"end": 2101
} | class ____(Provider):
id = "telegram"
name = "Telegram"
account_class = TelegramAccount
supports_redirect = True
def get_login_url(self, request, **kwargs):
url = reverse("telegram_login")
if kwargs:
url = url + "?" + urlencode(kwargs)
return url
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
ret = {}
if data.get("first_name"):
ret["first_name"] = data.get("first_name")
if data.get("last_name"):
ret["last_name"] = data.get("last_name")
if data.get("username"):
ret["username"] = data.get("username")
return ret
def get_auth_date_validity(self):
auth_date_validity = 30
settings = self.get_settings()
if "AUTH_PARAMS" in settings:
auth_date_validity = settings.get("AUTH_PARAMS").get(
"auth_date_validity", auth_date_validity
)
auth_date_validity = self.app.settings.get(
"auth_date_validity", auth_date_validity
)
return auth_date_validity
def redirect(self, request, process, next_url=None, data=None, **kwargs):
state = self.stash_redirect_state(request, process, next_url, data, **kwargs)
return_to = request.build_absolute_uri(
reverse("telegram_callback") + "?" + urlencode({"state": state})
)
url = "https://oauth.telegram.org/auth?" + urlencode(
{
"origin": request.build_absolute_uri("/"),
"bot_id": self.app.client_id,
"request_access": "write",
"embed": "0",
"return_to": return_to,
}
)
return HttpResponseRedirect(url)
provider_classes = [TelegramProvider]
| TelegramProvider |
python | django__django | tests/gis_tests/geo3d/models.py | {
"start": 1397,
"end": 1545
} | class ____(SimpleModel):
mpoint = models.MultiPointField(dim=3)
class Meta:
required_db_features = {"supports_3d_storage"}
| MultiPoint3D |
python | Pylons__pyramid | tests/test_exceptions.py | {
"start": 827,
"end": 1165
} | class ____(unittest.TestCase):
def test_response_equivalence(self):
from pyramid.exceptions import BadCSRFToken
from pyramid.httpexceptions import HTTPBadRequest
self.assertTrue(isinstance(BadCSRFToken(), HTTPBadRequest))
self.assertEqual(BadCSRFToken().status, HTTPBadRequest().status)
| TestBadCSRFToken |
python | crytic__slither | slither/core/declarations/solidity_variables.py | {
"start": 7232,
"end": 7964
} | class ____(SolidityFunction):
def __init__(self, custom_error: CustomError) -> None: # pylint: disable=super-init-not-called
self._name = "revert " + custom_error.solidity_signature
self._custom_error = custom_error
self._return_type: List[Union[TypeInformation, ElementaryType]] = []
@property
def custom_error(self) -> CustomError:
return self._custom_error
def __eq__(self, other: Any) -> bool:
return (
self.__class__ == other.__class__
and self.name == other.name
and self._custom_error == other._custom_error
)
def __hash__(self) -> int:
return hash(hash(self.name) + hash(self._custom_error))
| SolidityCustomRevert |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_scatter01.py | {
"start": 315,
"end": 1406
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_scatter01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter"})
chart.axis_ids = [40262272, 40260352]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5"}
)
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5"}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django-crispy-forms__django-crispy-forms | tests/forms.py | {
"start": 2497,
"end": 2682
} | class ____(BaseForm):
select = forms.ChoiceField(
choices=((1, "Option one"), (2, "Option two"), (3, "Option three")), initial=(1,), widget=forms.Select
)
| SelectSampleForm |
python | fastapi__sqlmodel | tests/test_tutorial/test_code_structure/test_tutorial001.py | {
"start": 549,
"end": 1465
} | class ____:
app: ModuleType
database: ModuleType
@pytest.fixture(
name="modules",
params=[
"tutorial001",
pytest.param("tutorial001_py39", marks=needs_py39),
pytest.param("tutorial001_py310", marks=needs_py310),
],
)
def get_modules(request: pytest.FixtureRequest) -> Modules:
app_module = importlib.import_module(
f"docs_src.tutorial.code_structure.{request.param}.app"
)
database_module = importlib.import_module(
f"docs_src.tutorial.code_structure.{request.param}.database"
)
database_module.sqlite_url = "sqlite://"
database_module.engine = create_engine(database_module.sqlite_url)
app_module.engine = database_module.engine
return Modules(app=app_module, database=database_module)
def test_tutorial(print_mock: PrintMock, modules: Modules):
modules.app.main()
assert print_mock.calls == expected_calls
| Modules |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.