language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
walkccc__LeetCode
solutions/2563. Count the Number of Fair Pairs/2563.py
{ "start": 0, "end": 513 }
class ____: def countFairPairs(self, nums: list[int], lower: int, upper: int) -> int: # nums[i] + nums[j] == nums[j] + nums[i], so the condition that i < j # degrades to i != j and we can sort the array. nums.sort() def countLess(summ: int) -> int: res = 0 i = 0 j = len(nums) - 1 while i < j: while i < j and nums[i] + nums[j] > summ: j -= 1 res += j - i i += 1 return res return countLess(upper) - countLess(lower - 1)
Solution
python
pytorch__pytorch
tools/experimental/torchfuzz/operators/tensor_pointwise.py
{ "start": 3227, "end": 3456 }
class ____(PointwiseOperator): """Operator for element-wise subtraction.""" def __init__(self): super().__init__("sub", "-") @property def torch_op_name(self) -> str: return "torch.sub"
SubOperator
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_button08.py
{ "start": 315, "end": 948 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("button08.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet() worksheet2 = workbook.add_worksheet() worksheet1.insert_button("C2", {}) worksheet2.write_comment("A1", "Foo") worksheet2.set_comments_author("John") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
django__django
django/contrib/postgres/search.py
{ "start": 12541, "end": 12622 }
class ____(TrigramBase): function = "" arg_joiner = " <-> "
TrigramDistance
python
dask__distributed
distributed/deploy/local.py
{ "start": 529, "end": 10735 }
class ____(SpecCluster): """Create local Scheduler and Workers This creates a "cluster" of a scheduler and workers running on the local machine. Parameters ---------- n_workers: int Number of workers to start memory_limit: str, float, int, or None, default "auto" Sets the memory limit *per worker*. Notes regarding argument data type: * If None or 0, no limit is applied. * If "auto", the total system memory is split evenly between the workers. * If a float, that fraction of the system memory is used *per worker*. * If a string giving a number of bytes (like ``"1GiB"``), that amount is used *per worker*. * If an int, that number of bytes is used *per worker*. Note that the limit will only be enforced when ``processes=True``, and the limit is only enforced on a best-effort basis — it's still possible for workers to exceed this limit. processes: bool Whether to use processes (True) or threads (False). Defaults to True, unless worker_class=Worker, in which case it defaults to False. threads_per_worker: int Number of threads per each worker scheduler_port: int Port of the scheduler. Use 0 to choose a random port (default). 8786 is a common choice. silence_logs: logging level Level of logs to print out to stdout. ``logging.WARN`` by default. Use a falsey value like False or None for no change. host: string Host address on which the scheduler will listen, defaults to only localhost ip: string Deprecated. See ``host`` above. dashboard_address: str Address on which to listen for the Bokeh diagnostics server like 'localhost:8787' or '0.0.0.0:8787'. Defaults to ':8787'. Set to ``None`` to disable the dashboard. Use ':0' for a random port. When specifying only a port like ':8787', the dashboard will bind to the given interface from the ``host`` parameter. If ``host`` is empty, binding will occur on all interfaces '0.0.0.0'. To avoid firewall issues when deploying locally, set ``host`` to 'localhost'. worker_dashboard_address: str Address on which to listen for the Bokeh worker diagnostics server like 'localhost:8787' or '0.0.0.0:8787'. Defaults to None which disables the dashboard. Use ':0' for a random port. diagnostics_port: int Deprecated. See dashboard_address. asynchronous: bool (False by default) Set to True if using this cluster within async/await functions or within Tornado gen.coroutines. This should remain False for normal use. blocked_handlers: List[str] A list of strings specifying a blocklist of handlers to disallow on the Scheduler, like ``['feed', 'run_function']`` service_kwargs: Dict[str, Dict] Extra keywords to hand to the running services security : Security or bool, optional Configures communication security in this cluster. Can be a security object, or True. If True, temporary self-signed credentials will be created automatically. protocol: str (optional) Protocol to use like ``tcp://``, ``tls://``, ``inproc://`` This defaults to sensible choice given other keyword arguments like ``processes`` and ``security`` interface: str (optional) Network interface to use. Defaults to lo/localhost worker_class: Worker Worker class used to instantiate workers from. Defaults to Worker if processes=False and Nanny if processes=True or omitted. **worker_kwargs: Extra worker arguments. Any additional keyword arguments will be passed to the ``Worker`` class constructor. Examples -------- >>> cluster = LocalCluster() # Create a local cluster # doctest: +SKIP >>> cluster # doctest: +SKIP LocalCluster("127.0.0.1:8786", workers=8, threads=8) >>> c = Client(cluster) # connect to local cluster # doctest: +SKIP Scale the cluster to three workers >>> cluster.scale(3) # doctest: +SKIP Pass extra keyword arguments to Bokeh >>> LocalCluster(service_kwargs={'dashboard': {'prefix': '/foo'}}) # doctest: +SKIP """ def __init__( self, name=None, n_workers=None, threads_per_worker=None, processes=None, loop=None, start=None, host=None, ip=None, scheduler_port=0, silence_logs=logging.WARN, dashboard_address=":8787", worker_dashboard_address=None, diagnostics_port=None, services=None, worker_services=None, service_kwargs=None, asynchronous=False, security=None, protocol=None, blocked_handlers=None, interface=None, worker_class=None, scheduler_kwargs=None, scheduler_sync_interval=1, **worker_kwargs, ): if ip is not None: # In the future we should warn users about this move # warnings.warn("The ip keyword has been moved to host") host = ip if diagnostics_port is not None: warnings.warn( "diagnostics_port has been deprecated. " "Please use `dashboard_address=` instead" ) dashboard_address = diagnostics_port if threads_per_worker == 0: warnings.warn( "Setting `threads_per_worker` to 0 has been deprecated. " "Please set to None or to a specific int." ) threads_per_worker = None if "dashboard" in worker_kwargs: warnings.warn( "Setting `dashboard` is discouraged. " "Please set `dashboard_address` to affect the scheduler (more common) " "and `worker_dashboard_address` for the worker (less common)." ) if processes is None: processes = worker_class is None or issubclass(worker_class, Nanny) if worker_class is None: worker_class = Nanny if processes else Worker self.status = None self.processes = processes if security is None: # Falsey values load the default configuration security = Security() elif security is True: # True indicates self-signed temporary credentials should be used security = Security.temporary() elif not isinstance(security, Security): raise TypeError("security must be a Security object") if protocol is None: if host and "://" in host: protocol = host.split("://")[0] elif security and security.require_encryption: protocol = "tls://" elif not self.processes and not scheduler_port: protocol = "inproc://" else: protocol = "tcp://" if not protocol.endswith("://"): protocol = protocol + "://" if host is None and not protocol.startswith("inproc") and not interface: host = "127.0.0.1" services = services or {} worker_services = worker_services or {} if n_workers is None and threads_per_worker is None: if processes: n_workers, threads_per_worker = nprocesses_nthreads() else: n_workers = 1 threads_per_worker = CPU_COUNT if n_workers is None and threads_per_worker is not None: n_workers = max(1, CPU_COUNT // threads_per_worker) if processes else 1 if n_workers and threads_per_worker is None: # Overcommit threads per worker, rather than undercommit threads_per_worker = max(1, math.ceil(CPU_COUNT / n_workers)) if n_workers and "memory_limit" not in worker_kwargs: worker_kwargs["memory_limit"] = parse_memory_limit( "auto", 1, n_workers, logger=logger ) worker_kwargs.update( { "host": host, "nthreads": threads_per_worker, "services": worker_services, "dashboard_address": worker_dashboard_address, "dashboard": worker_dashboard_address is not None, "interface": interface, "protocol": protocol, "security": security, "silence_logs": silence_logs, } ) scheduler = { "cls": Scheduler, "options": toolz.merge( dict( host=host, services=services, service_kwargs=service_kwargs, security=security, port=scheduler_port, interface=interface, protocol=protocol, dashboard=dashboard_address is not None, dashboard_address=dashboard_address, blocked_handlers=blocked_handlers, ), scheduler_kwargs or {}, ), } worker = {"cls": worker_class, "options": worker_kwargs} workers = {i: worker for i in range(n_workers)} super().__init__( name=name, scheduler=scheduler, workers=workers, worker=worker, loop=loop, asynchronous=asynchronous, silence_logs=silence_logs, security=security, scheduler_sync_interval=scheduler_sync_interval, ) def start_worker(self, *args, **kwargs): raise NotImplementedError( "The `cluster.start_worker` function has been removed. " "Please see the `cluster.scale` method instead." ) def _repr_html_(self, cluster_status=None): cluster_status = get_template("local_cluster.html.j2").render( status=self.status.name, processes=self.processes, cluster_status=cluster_status, ) return super()._repr_html_(cluster_status=cluster_status)
LocalCluster
python
eventlet__eventlet
tests/mock.py
{ "start": 62225, "end": 62816 }
class ____: def __init__(self, name, parent): self.name = name self.parent = parent def __call__(self, *args, **kwargs): m = self.create_mock() return m(*args, **kwargs) def create_mock(self): entry = self.name parent = self.parent m = parent._get_child_mock(name=entry, _new_name=entry, _new_parent=parent) setattr(parent, entry, m) _set_return_value(parent, m, entry) return m def __get__(self, obj, _type=None): return self.create_mock()
MagicProxy
python
matplotlib__matplotlib
lib/matplotlib/widgets.py
{ "start": 134668, "end": 137288 }
class ____(_SelectorWidget): """ Selection curve of an arbitrary shape. For the selector to remain responsive you must keep a reference to it. The selected path can be used in conjunction with `~.Path.contains_point` to select data points from an image. In contrast to `Lasso`, `LassoSelector` is written with an interface similar to `RectangleSelector` and `SpanSelector`, and will continue to interact with the Axes until disconnected. Example usage:: ax = plt.subplot() ax.plot(x, y) def onselect(verts): print(verts) lasso = LassoSelector(ax, onselect) Parameters ---------- ax : `~matplotlib.axes.Axes` The parent Axes for the widget. onselect : function, optional Whenever the lasso is released, the *onselect* function is called and passed the vertices of the selected path. useblit : bool, default: True Whether to use blitting for faster drawing (if supported by the backend). See the tutorial :ref:`blitting` for details. props : dict, optional Properties with which the line is drawn, see `.Line2D` for valid properties. Default values are defined in ``mpl.rcParams``. button : `.MouseButton` or list of `.MouseButton`, optional The mouse buttons used for rectangle selection. Default is ``None``, which corresponds to all buttons. """ def __init__(self, ax, onselect=None, *, useblit=True, props=None, button=None): super().__init__(ax, onselect, useblit=useblit, button=button) self.verts = None props = { **(props if props is not None else {}), # Note that self.useblit may be != useblit, if the canvas doesn't # support blitting. 'animated': self.useblit, 'visible': False, } line = Line2D([], [], **props) self.ax.add_line(line) self._selection_artist = line def _press(self, event): self.verts = [self._get_data(event)] self._selection_artist.set_visible(True) def _release(self, event): if self.verts is not None: self.verts.append(self._get_data(event)) self.onselect(self.verts) self._selection_artist.set_data([[], []]) self._selection_artist.set_visible(False) self.verts = None def _onmove(self, event): if self.verts is None: return self.verts.append(self._get_data(event)) self._selection_artist.set_data(list(zip(*self.verts))) self.update()
LassoSelector
python
celery__celery
celery/worker/autoscale.py
{ "start": 744, "end": 1670 }
class ____(bootsteps.StartStopStep): """Bootstep that starts the autoscaler thread/timer in the worker.""" label = 'Autoscaler' conditional = True requires = (Pool,) def __init__(self, w, **kwargs): self.enabled = w.autoscale w.autoscaler = None def create(self, w): scaler = w.autoscaler = self.instantiate( w.autoscaler_cls, w.pool, w.max_concurrency, w.min_concurrency, worker=w, mutex=DummyLock() if w.use_eventloop else None, ) return scaler if not w.use_eventloop else None def register_with_event_loop(self, w, hub): w.consumer.on_task_message.add(w.autoscaler.maybe_scale) hub.call_repeatedly( w.autoscaler.keepalive, w.autoscaler.maybe_scale, ) def info(self, w): """Return `Autoscaler` info.""" return {'autoscaler': w.autoscaler.info()}
WorkerComponent
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/triggers/test_emr.py
{ "start": 6992, "end": 7817 }
class ____: def test_serialization(self): application_id = "test_application_id" waiter_delay = 30 waiter_max_attempts = 60 aws_conn_id = "aws_default" trigger = EmrServerlessStartApplicationTrigger( application_id=application_id, waiter_delay=waiter_delay, waiter_max_attempts=waiter_max_attempts, aws_conn_id=aws_conn_id, ) classpath, kwargs = trigger.serialize() assert classpath == "airflow.providers.amazon.aws.triggers.emr.EmrServerlessStartApplicationTrigger" assert kwargs == { "application_id": "test_application_id", "waiter_delay": 30, "waiter_max_attempts": 60, "aws_conn_id": "aws_default", }
TestEmrServerlessStartApplicationTrigger
python
tiangolo__fastapi
tests/test_openapi_separate_input_output_schemas.py
{ "start": 434, "end": 21216 }
class ____(BaseModel): name: str description: Optional[str] = None sub: Optional[SubItem] = None if PYDANTIC_V2: model_config = {"json_schema_serialization_defaults_required": True} def get_app_client(separate_input_output_schemas: bool = True) -> TestClient: app = FastAPI(separate_input_output_schemas=separate_input_output_schemas) @app.post("/items/", responses={402: {"model": Item}}) def create_item(item: Item) -> Item: return item @app.post("/items-list/") def create_item_list(item: List[Item]): return item @app.get("/items/") def read_items() -> List[Item]: return [ Item( name="Portal Gun", description="Device to travel through the multi-rick-verse", sub=SubItem(subname="subname"), ), Item(name="Plumbus"), ] client = TestClient(app) return client def test_create_item(): client = get_app_client() client_no = get_app_client(separate_input_output_schemas=False) response = client.post("/items/", json={"name": "Plumbus"}) response2 = client_no.post("/items/", json={"name": "Plumbus"}) assert response.status_code == response2.status_code == 200, response.text assert ( response.json() == response2.json() == {"name": "Plumbus", "description": None, "sub": None} ) def test_create_item_with_sub(): client = get_app_client() client_no = get_app_client(separate_input_output_schemas=False) data = { "name": "Plumbus", "sub": {"subname": "SubPlumbus", "sub_description": "Sub WTF"}, } response = client.post("/items/", json=data) response2 = client_no.post("/items/", json=data) assert response.status_code == response2.status_code == 200, response.text assert ( response.json() == response2.json() == { "name": "Plumbus", "description": None, "sub": {"subname": "SubPlumbus", "sub_description": "Sub WTF", "tags": []}, } ) def test_create_item_list(): client = get_app_client() client_no = get_app_client(separate_input_output_schemas=False) data = [ {"name": "Plumbus"}, { "name": "Portal Gun", "description": "Device to travel through the multi-rick-verse", }, ] response = client.post("/items-list/", json=data) response2 = client_no.post("/items-list/", json=data) assert response.status_code == response2.status_code == 200, response.text assert ( response.json() == response2.json() == [ {"name": "Plumbus", "description": None, "sub": None}, { "name": "Portal Gun", "description": "Device to travel through the multi-rick-verse", "sub": None, }, ] ) def test_read_items(): client = get_app_client() client_no = get_app_client(separate_input_output_schemas=False) response = client.get("/items/") response2 = client_no.get("/items/") assert response.status_code == response2.status_code == 200, response.text assert ( response.json() == response2.json() == [ { "name": "Portal Gun", "description": "Device to travel through the multi-rick-verse", "sub": {"subname": "subname", "sub_description": None, "tags": []}, }, {"name": "Plumbus", "description": None, "sub": None}, ] ) @needs_pydanticv2 def test_openapi_schema(): client = get_app_client() response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == snapshot( { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/items/": { "get": { "summary": "Read Items", "operationId": "read_items_items__get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "items": { "$ref": "#/components/schemas/Item-Output" }, "type": "array", "title": "Response Read Items Items Get", } } }, } }, }, "post": { "summary": "Create Item", "operationId": "create_item_items__post", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Item-Input" } } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Item-Output" } } }, }, "402": { "description": "Payment Required", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Item-Output" } } }, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, }, }, "/items-list/": { "post": { "summary": "Create Item List", "operationId": "create_item_list_items_list__post", "requestBody": { "content": { "application/json": { "schema": { "items": { "$ref": "#/components/schemas/Item-Input" }, "type": "array", "title": "Item", } } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, } }, }, "components": { "schemas": { "HTTPValidationError": { "properties": { "detail": { "items": { "$ref": "#/components/schemas/ValidationError" }, "type": "array", "title": "Detail", } }, "type": "object", "title": "HTTPValidationError", }, "Item-Input": { "properties": { "name": {"type": "string", "title": "Name"}, "description": { "anyOf": [{"type": "string"}, {"type": "null"}], "title": "Description", }, "sub": { "anyOf": [ {"$ref": "#/components/schemas/SubItem-Input"}, {"type": "null"}, ] }, }, "type": "object", "required": ["name"], "title": "Item", }, "Item-Output": { "properties": { "name": {"type": "string", "title": "Name"}, "description": { "anyOf": [{"type": "string"}, {"type": "null"}], "title": "Description", }, "sub": { "anyOf": [ {"$ref": "#/components/schemas/SubItem-Output"}, {"type": "null"}, ] }, }, "type": "object", "required": ["name", "description", "sub"], "title": "Item", }, "SubItem-Input": { "properties": { "subname": {"type": "string", "title": "Subname"}, "sub_description": { "anyOf": [{"type": "string"}, {"type": "null"}], "title": "Sub Description", }, "tags": { "items": {"type": "string"}, "type": "array", "title": "Tags", "default": [], }, }, "type": "object", "required": ["subname"], "title": "SubItem", }, "SubItem-Output": { "properties": { "subname": {"type": "string", "title": "Subname"}, "sub_description": { "anyOf": [{"type": "string"}, {"type": "null"}], "title": "Sub Description", }, "tags": { "items": {"type": "string"}, "type": "array", "title": "Tags", "default": [], }, }, "type": "object", "required": ["subname", "sub_description", "tags"], "title": "SubItem", }, "ValidationError": { "properties": { "loc": { "items": { "anyOf": [{"type": "string"}, {"type": "integer"}] }, "type": "array", "title": "Location", }, "msg": {"type": "string", "title": "Message"}, "type": {"type": "string", "title": "Error Type"}, }, "type": "object", "required": ["loc", "msg", "type"], "title": "ValidationError", }, } }, } ) @needs_pydanticv2 def test_openapi_schema_no_separate(): client = get_app_client(separate_input_output_schemas=False) response = client.get("/openapi.json") assert response.status_code == 200, response.text assert response.json() == { "openapi": "3.1.0", "info": {"title": "FastAPI", "version": "0.1.0"}, "paths": { "/items/": { "get": { "summary": "Read Items", "operationId": "read_items_items__get", "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": { "items": {"$ref": "#/components/schemas/Item"}, "type": "array", "title": "Response Read Items Items Get", } } }, } }, }, "post": { "summary": "Create Item", "operationId": "create_item_items__post", "requestBody": { "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Item"} } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Item"} } }, }, "402": { "description": "Payment Required", "content": { "application/json": { "schema": {"$ref": "#/components/schemas/Item"} } }, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, }, }, "/items-list/": { "post": { "summary": "Create Item List", "operationId": "create_item_list_items_list__post", "requestBody": { "content": { "application/json": { "schema": { "items": {"$ref": "#/components/schemas/Item"}, "type": "array", "title": "Item", } } }, "required": True, }, "responses": { "200": { "description": "Successful Response", "content": {"application/json": {"schema": {}}}, }, "422": { "description": "Validation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/HTTPValidationError" } } }, }, }, } }, }, "components": { "schemas": { "HTTPValidationError": { "properties": { "detail": { "items": {"$ref": "#/components/schemas/ValidationError"}, "type": "array", "title": "Detail", } }, "type": "object", "title": "HTTPValidationError", }, "Item": { "properties": { "name": {"type": "string", "title": "Name"}, "description": { "anyOf": [{"type": "string"}, {"type": "null"}], "title": "Description", }, "sub": { "anyOf": [ {"$ref": "#/components/schemas/SubItem"}, {"type": "null"}, ] }, }, "type": "object", "required": ["name"], "title": "Item", }, "SubItem": { "properties": { "subname": {"type": "string", "title": "Subname"}, "sub_description": { "anyOf": [{"type": "string"}, {"type": "null"}], "title": "Sub Description", }, "tags": { "items": {"type": "string"}, "type": "array", "title": "Tags", "default": [], }, }, "type": "object", "required": ["subname"], "title": "SubItem", }, "ValidationError": { "properties": { "loc": { "items": { "anyOf": [{"type": "string"}, {"type": "integer"}] }, "type": "array", "title": "Location", }, "msg": {"type": "string", "title": "Message"}, "type": {"type": "string", "title": "Error Type"}, }, "type": "object", "required": ["loc", "msg", "type"], "title": "ValidationError", }, } }, }
Item
python
huggingface__transformers
src/transformers/models/lfm2/modular_lfm2.py
{ "start": 8467, "end": 11096 }
class ____(LlamaAttention): def __init__(self, config: Lfm2Config, layer_idx: int): super().__init__(config, layer_idx) self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.out_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.q_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps) self.k_layernorm = Lfm2RMSNorm(self.head_dim, eps=config.norm_eps) del self.o_proj del self.attention_dropout def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Lfm2HybridConvCache] = None, cache_position: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_layernorm(self.q_proj(hidden_states).view(*hidden_shape)).transpose(1, 2) key_states = self.k_layernorm(self.k_proj(hidden_states).view(*hidden_shape)).transpose(1, 2) value_states = self.v_proj(hidden_states).view(*hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() output = self.out_proj(attn_output) return output, attn_weights
Lfm2Attention
python
xlwings__xlwings
xlwings/main.py
{ "start": 125466, "end": 132022 }
class ____(Collection): """ A collection of all :meth:`picture <Picture>` objects on the specified sheet: >>> import xlwings as xw >>> xw.books['Book1'].sheets[0].pictures Pictures([<Picture 'Picture 1' in <Sheet [Book1]Sheet1>>, <Picture 'Picture 2' in <Sheet [Book1]Sheet1>>]) .. versionadded:: 0.9.0 """ _wrap = Picture @property def parent(self): return Sheet(impl=self.impl.parent) def add( self, image, link_to_file=False, save_with_document=True, left=None, top=None, width=None, height=None, name=None, update=False, scale=None, format=None, anchor=None, export_options=None, ): """ Adds a picture to the specified sheet. Arguments --------- image : str or path-like object or matplotlib.figure.Figure Either a filepath or a Matplotlib figure object. left : float, default None Left position in points, defaults to 0. If you use ``top``/``left``, you must not provide a value for ``anchor``. top : float, default None Top position in points, defaults to 0. If you use ``top``/``left``, you must not provide a value for ``anchor``. width : float, default None Width in points. Defaults to original width. height : float, default None Height in points. Defaults to original height. name : str, default None Excel picture name. Defaults to Excel standard name if not provided, e.g., 'Picture 1'. update : bool, default False Replace an existing picture with the same name. Requires ``name`` to be set. scale : float, default None Scales your picture by the provided factor. format : str, default None Only used if image is a Matplotlib or Plotly plot. By default, the plot is inserted in the "png" format, but you may want to change this to a vector-based format like "svg" on Windows (may require Microsoft 365) or "eps" on macOS for better print quality. If you use ``'vector'``, it will be using ``'svg'`` on Windows and ``'eps'`` on macOS. To find out which formats your version of Excel supports, see: https://support.microsoft.com/en-us/topic/support-for-eps-images-has-been-turned-off-in-office-a069d664-4bcf-415e-a1b5-cbb0c334a840 anchor: xw.Range, default None The xlwings Range object of where you want to insert the picture. If you use ``anchor``, you must not provide values for ``top``/``left``. .. versionadded:: 0.24.3 export_options : dict, default None For Matplotlib plots, this dictionary is passed on to ``image.savefig()`` with the following defaults: ``{"bbox_inches": "tight", "dpi": 200}``, so if you want to leave the picture uncropped and increase dpi to 300, use: ``export_options={"dpi": 300}``. For Plotly, the options are passed to ``write_image()``. .. versionadded:: 0.27.7 Returns ------- Picture Examples -------- 1. Picture >>> import xlwings as xw >>> sht = xw.Book().sheets[0] >>> sht.pictures.add(r'C:\\path\\to\\file.png') <Picture 'Picture 1' in <Sheet [Book1]Sheet1>> 2. Matplotlib >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.plot([1, 2, 3, 4, 5]) >>> sht.pictures.add(fig, name='MyPlot', update=True) <Picture 'MyPlot' in <Sheet [Book1]Sheet1>> """ if anchor: if top or left: raise ValueError( "You must either provide 'anchor' or 'top'/'left', but not both." ) if update: if name is None: raise ValueError("If update is true then name must be specified") else: try: pic = self[name] return pic.update( image, format=format, export_options=export_options ) except KeyError: pass if name and name in self.parent.pictures: raise ShapeAlreadyExists( f"'{name}' is already present on {self.parent.name}." ) filename, is_temp_file = utils.process_image( image, format="png" if not format else format, export_options=export_options, ) if not (link_to_file or save_with_document): raise Exception( "Arguments link_to_file and save_with_document cannot both be false" ) if ( (height and width is None) or (width and height is None) or (width is None and height is None) ): # If only height or width are provided, it will be scaled after adding it # with the original dimensions im_width, im_height = -1, -1 else: im_width, im_height = width, height picture = Picture( impl=self.impl.add( filename, link_to_file, save_with_document, left if left else None, top if top else None, width=im_width, height=im_height, anchor=anchor, ) ) if (height and width is None) or (width and height is None): # If only height or width are provided, lock aspect ratio so the picture # won't be distorted picture.lock_aspect_ratio = True if height: picture.height = height else: picture.width = width if scale: self.parent.shapes[picture.name].scale_width( factor=scale, relative_to_original_size=True ) self.parent.shapes[picture.name].scale_height( factor=scale, relative_to_original_size=True ) if name is not None: picture.name = name # Cleanup temp file if is_temp_file: try: os.unlink(filename) except: # noqa: E722 pass return picture
Pictures
python
PrefectHQ__prefect
tests/server/models/test_work_queues.py
{ "start": 2208, "end": 2705 }
class ____: async def test_read_work_queue_by_id(self, session, work_queue): read_work_queue = await models.work_queues.read_work_queue( session=session, work_queue_id=work_queue.id ) assert read_work_queue.name == work_queue.name async def test_read_work_queue_by_id_returns_none_if_does_not_exist(self, session): assert not await models.work_queues.read_work_queue( session=session, work_queue_id=uuid4() )
TestReadWorkQueue
python
huggingface__transformers
src/transformers/models/altclip/modeling_altclip.py
{ "start": 4100, "end": 8945 }
class ____(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): # NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0]) buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1) buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids) token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length) else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings @staticmethod def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) @staticmethod def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
AltRobertaEmbeddings
python
walkccc__LeetCode
solutions/1960. Maximum Product of the Length of Two Palindromic Substrings/1960.py
{ "start": 0, "end": 1009 }
class ____: def maxProduct(self, s: str) -> int: n = len(s) def manacher(s: str) -> list[int]: maxExtends = [0] * n leftToRight = [1] * n center = 0 for i in range(n): r = center + maxExtends[center] - 1 mirrorIndex = center - (i - center) extend = 1 if i > r else min(maxExtends[mirrorIndex], r - i + 1) while i - extend >= 0 and i + extend < n and s[i - extend] == s[i + extend]: leftToRight[i + extend] = 2 * extend + 1 extend += 1 maxExtends[i] = extend if i + maxExtends[i] >= r: center = i for i in range(1, n): leftToRight[i] = max(leftToRight[i], leftToRight[i - 1]) return leftToRight # maxLeft[i] := the maximum odd length of palindromes in s[0..i] maxLeft = manacher(s) # maxRight[i] := the maximum odd length of palindromes in s[i..n - 1] maxRight = manacher(s[::-1])[::-1] return max(maxLeft[i - 1] * maxRight[i] for i in range(1, n))
Solution
python
Lightning-AI__lightning
src/lightning/pytorch/utilities/migration/utils.py
{ "start": 3018, "end": 7951 }
class ____: """Registers legacy artifacts (classes, methods, etc.) that were removed but still need to be included for unpickling old checkpoints. The following patches apply. 1. ``lightning.pytorch.utilities.argparse._gpus_arg_default``: Applies to all checkpoints saved prior to version 1.2.8. See: https://github.com/Lightning-AI/pytorch-lightning/pull/6898 2. ``lightning.pytorch.utilities.argparse_utils``: A module that was deprecated in 1.2 and removed in 1.4, but still needs to be available for import for legacy checkpoints. 3. ``lightning.pytorch.utilities.enums._FaultTolerantMode``: This enum was removed in 2.0 but was pickled into older checkpoints. 4. In legacy versions of Lightning, callback classes got pickled into the checkpoint. These classes have a module import path under ``pytorch_lightning`` and must be redirected to the ``lightning.pytorch``. Example: with pl_legacy_patch(): torch.load("path/to/legacy/checkpoint.ckpt") """ def __enter__(self) -> "pl_legacy_patch": _lock.acquire() # `pl.utilities.argparse_utils` was renamed to `pl.utilities.argparse` legacy_argparse_module = ModuleType("lightning.pytorch.utilities.argparse_utils") sys.modules["lightning.pytorch.utilities.argparse_utils"] = legacy_argparse_module # `_gpus_arg_default` used to be imported from these locations legacy_argparse_module._gpus_arg_default = lambda x: x pl.utilities.argparse._gpus_arg_default = lambda x: x # `_FaultTolerantMode` was removed from the enums class _FaultTolerantMode(LightningEnum): DISABLED = "disabled" AUTOMATIC = "automatic" MANUAL = "manual" pl.utilities.enums._FaultTolerantMode = _FaultTolerantMode # Patch Unpickler to redirect `pytorch_lightning` imports self._old_unpickler = pickle.Unpickler pickle.Unpickler = _RedirectingUnpickler # type: ignore return self def __exit__( self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], exc_traceback: Optional[TracebackType], ) -> None: if hasattr(pl.utilities.argparse, "_gpus_arg_default"): delattr(pl.utilities.argparse, "_gpus_arg_default") del sys.modules["lightning.pytorch.utilities.argparse_utils"] if hasattr(pl.utilities.enums, "_FaultTolerantMode"): delattr(pl.utilities.enums, "_FaultTolerantMode") pickle.Unpickler = self._old_unpickler # type: ignore _lock.release() def _pl_migrate_checkpoint(checkpoint: _CHECKPOINT, checkpoint_path: Optional[_PATH] = None) -> _CHECKPOINT: """Applies Lightning version migrations to a checkpoint dictionary and prints infos for the user. This function is used by the Lightning Trainer when resuming from a checkpoint. """ old_version = _get_version(checkpoint) checkpoint, migrations = migrate_checkpoint(checkpoint) new_version = _get_version(checkpoint) if not migrations or checkpoint_path is None: # the checkpoint was already a new one, no migrations were needed return checkpoint # include the full upgrade command, including the path to the loaded file in the error message, # so user can copy-paste and run if they want # side-step bug: ValueError: path is on mount 'C:', start on mount 'D:' path_hint = os.path.relpath(checkpoint_path, os.getcwd()) if not _IS_WINDOWS else os.path.abspath(checkpoint_path) _log.info( f"Lightning automatically upgraded your loaded checkpoint from v{old_version} to v{new_version}." " To apply the upgrade to your files permanently, run" f" `python -m lightning.pytorch.utilities.upgrade_checkpoint {str(path_hint)}`" ) return checkpoint def _get_version(checkpoint: _CHECKPOINT) -> str: """Get the version of a Lightning checkpoint.""" return checkpoint["pytorch-lightning_version"] def _set_version(checkpoint: _CHECKPOINT, version: str) -> None: """Set the version of a Lightning checkpoint.""" checkpoint["pytorch-lightning_version"] = version def _set_legacy_version(checkpoint: _CHECKPOINT, version: str) -> None: """Set the legacy version of a Lightning checkpoint if a legacy version is not already set.""" checkpoint.setdefault("legacy_pytorch-lightning_version", version) def _should_upgrade(checkpoint: _CHECKPOINT, target: str, max_version: Optional[str] = None) -> bool: """Returns whether a checkpoint qualifies for an upgrade when the version is lower than the given target.""" target_version = Version(target) is_lte_max_version = max_version is None or target_version <= Version(max_version) return is_lte_max_version and Version(_get_version(checkpoint)) < target_version
pl_legacy_patch
python
pennersr__django-allauth
allauth/socialaccount/providers/github/provider.py
{ "start": 528, "end": 1625 }
class ____(OAuth2Provider): id = "github" name = "GitHub" account_class = GitHubAccount oauth2_adapter_class = GitHubOAuth2Adapter def get_default_scope(self): scope = [] if app_settings.QUERY_EMAIL: scope.append("user:email") return scope def extract_uid(self, data): return str(data["id"]) def extract_common_fields(self, data): return dict( email=data.get("email"), username=data.get("login"), name=data.get("name"), ) def extract_extra_data(self, data): if "emails" in data: data = dict(data) data.pop("emails") return data def extract_email_addresses(self, data): ret = [] for email in data.get("emails", []): ret.append( EmailAddress( email=email["email"], primary=email["primary"], verified=email["verified"], ) ) return ret provider_classes = [GitHubProvider]
GitHubProvider
python
django__django
tests/aggregation/models.py
{ "start": 509, "end": 1031 }
class ____(models.Model): isbn = models.CharField(max_length=9) name = models.CharField(max_length=255) pages = models.IntegerField() rating = models.FloatField() price = models.DecimalField(decimal_places=2, max_digits=6) authors = models.ManyToManyField(Author) contact = models.ForeignKey(Author, models.CASCADE, related_name="book_contact_set") publisher = models.ForeignKey(Publisher, models.CASCADE) pubdate = models.DateField() def __str__(self): return self.name
Book
python
django__django
tests/admin_changelist/tests.py
{ "start": 81565, "end": 97807 }
class ____(AdminSeleniumTestCase): available_apps = ["admin_changelist"] + AdminSeleniumTestCase.available_apps def setUp(self): User.objects.create_superuser(username="super", password="secret", email=None) def test_add_row_selection(self): """ The status line for selected rows gets updated correctly (#22038). """ from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get(self.live_server_url + reverse("admin:auth_user_changelist")) form_id = "#changelist-form" # Test amount of rows in the Changelist rows = self.selenium.find_elements( By.CSS_SELECTOR, "%s #result_list tbody tr" % form_id ) self.assertEqual(len(rows), 1) row = rows[0] selection_indicator = self.selenium.find_element( By.CSS_SELECTOR, "%s .action-counter" % form_id ) all_selector = self.selenium.find_element(By.ID, "action-toggle") row_selector = self.selenium.find_element( By.CSS_SELECTOR, "%s #result_list tbody tr:first-child .action-select" % form_id, ) # Test current selection self.assertEqual(selection_indicator.text, "0 of 1 selected") self.assertIs(all_selector.get_property("checked"), False) self.assertEqual(row.get_attribute("class"), "") # Select a row and check again row_selector.click() self.assertEqual(selection_indicator.text, "1 of 1 selected") self.assertIs(all_selector.get_property("checked"), True) self.assertEqual(row.get_attribute("class"), "selected") # Deselect a row and check again row_selector.click() self.assertEqual(selection_indicator.text, "0 of 1 selected") self.assertIs(all_selector.get_property("checked"), False) self.assertEqual(row.get_attribute("class"), "") def test_modifier_allows_multiple_section(self): """ Selecting a row and then selecting another row whilst holding shift should select all rows in-between. """ from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys Parent.objects.bulk_create([Parent(name="parent%d" % i) for i in range(5)]) self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_changelist_parent_changelist") ) checkboxes = self.selenium.find_elements( By.CSS_SELECTOR, "tr input.action-select" ) self.assertEqual(len(checkboxes), 5) for c in checkboxes: self.assertIs(c.get_property("checked"), False) # Check first row. Hold-shift and check next-to-last row. checkboxes[0].click() ActionChains(self.selenium).key_down(Keys.SHIFT).click(checkboxes[-2]).key_up( Keys.SHIFT ).perform() for c in checkboxes[:-2]: self.assertIs(c.get_property("checked"), True) self.assertIs(checkboxes[-1].get_property("checked"), False) def test_selection_counter_is_synced_when_page_is_shown(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get(self.live_server_url + reverse("admin:auth_user_changelist")) form_id = "#changelist-form" first_row_checkbox_selector = ( f"{form_id} #result_list tbody tr:first-child .action-select" ) selection_indicator_selector = f"{form_id} .action-counter" selection_indicator = self.selenium.find_element( By.CSS_SELECTOR, selection_indicator_selector ) row_checkbox = self.selenium.find_element( By.CSS_SELECTOR, first_row_checkbox_selector ) # Select a row. row_checkbox.click() self.assertEqual(selection_indicator.text, "1 of 1 selected") # Go to another page and get back. self.selenium.get( self.live_server_url + reverse("admin:admin_changelist_parent_changelist") ) self.selenium.back() # The selection indicator is synced with the selected checkboxes. selection_indicator = self.selenium.find_element( By.CSS_SELECTOR, selection_indicator_selector ) row_checkbox = self.selenium.find_element( By.CSS_SELECTOR, first_row_checkbox_selector ) selected_rows = 1 if row_checkbox.is_selected() else 0 self.assertEqual(selection_indicator.text, f"{selected_rows} of 1 selected") def test_select_all_across_pages(self): from selenium.webdriver.common.by import By Parent.objects.bulk_create([Parent(name="parent%d" % i) for i in range(101)]) self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_changelist_parent_changelist") ) selection_indicator = self.selenium.find_element( By.CSS_SELECTOR, ".action-counter" ) select_all_indicator = self.selenium.find_element( By.CSS_SELECTOR, ".actions .all" ) question = self.selenium.find_element(By.CSS_SELECTOR, ".actions > .question") clear = self.selenium.find_element(By.CSS_SELECTOR, ".actions > .clear") select_all = self.selenium.find_element(By.ID, "action-toggle") select_across = self.selenium.find_elements(By.NAME, "select_across") self.assertIs(question.is_displayed(), False) self.assertIs(clear.is_displayed(), False) self.assertIs(select_all.get_property("checked"), False) for hidden_input in select_across: self.assertEqual(hidden_input.get_property("value"), "0") self.assertIs(selection_indicator.is_displayed(), True) self.assertEqual(selection_indicator.text, "0 of 100 selected") self.assertIs(select_all_indicator.is_displayed(), False) select_all.click() self.assertIs(question.is_displayed(), True) self.assertIs(clear.is_displayed(), False) self.assertIs(select_all.get_property("checked"), True) for hidden_input in select_across: self.assertEqual(hidden_input.get_property("value"), "0") self.assertIs(selection_indicator.is_displayed(), True) self.assertEqual(selection_indicator.text, "100 of 100 selected") self.assertIs(select_all_indicator.is_displayed(), False) question.click() self.assertIs(question.is_displayed(), False) self.assertIs(clear.is_displayed(), True) self.assertIs(select_all.get_property("checked"), True) for hidden_input in select_across: self.assertEqual(hidden_input.get_property("value"), "1") self.assertIs(selection_indicator.is_displayed(), False) self.assertIs(select_all_indicator.is_displayed(), True) clear.click() self.assertIs(question.is_displayed(), False) self.assertIs(clear.is_displayed(), False) self.assertIs(select_all.get_property("checked"), False) for hidden_input in select_across: self.assertEqual(hidden_input.get_property("value"), "0") self.assertIs(selection_indicator.is_displayed(), True) self.assertEqual(selection_indicator.text, "0 of 100 selected") self.assertIs(select_all_indicator.is_displayed(), False) def test_actions_warn_on_pending_edits(self): from selenium.webdriver.common.by import By Parent.objects.create(name="foo") self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_changelist_parent_changelist") ) name_input = self.selenium.find_element(By.ID, "id_form-0-name") name_input.clear() name_input.send_keys("bar") self.selenium.find_element(By.ID, "action-toggle").click() self.selenium.find_element(By.NAME, "index").click() # Go alert = self.selenium.switch_to.alert try: self.assertEqual( alert.text, "You have unsaved changes on individual editable fields. If you " "run an action, your unsaved changes will be lost.", ) finally: alert.dismiss() def test_save_with_changes_warns_on_pending_action(self): from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import Select Parent.objects.create(name="parent") self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_changelist_parent_changelist") ) name_input = self.selenium.find_element(By.ID, "id_form-0-name") name_input.clear() name_input.send_keys("other name") Select(self.selenium.find_element(By.NAME, "action")).select_by_value( "delete_selected" ) self.selenium.find_element(By.NAME, "_save").click() alert = self.selenium.switch_to.alert try: self.assertEqual( alert.text, "You have selected an action, but you haven’t saved your " "changes to individual fields yet. Please click OK to save. " "You’ll need to re-run the action.", ) finally: alert.dismiss() def test_save_without_changes_warns_on_pending_action(self): from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import Select Parent.objects.create(name="parent") self.admin_login(username="super", password="secret") self.selenium.get( self.live_server_url + reverse("admin:admin_changelist_parent_changelist") ) Select(self.selenium.find_element(By.NAME, "action")).select_by_value( "delete_selected" ) self.selenium.find_element(By.NAME, "_save").click() alert = self.selenium.switch_to.alert try: self.assertEqual( alert.text, "You have selected an action, and you haven’t made any " "changes on individual fields. You’re probably looking for " "the Go button rather than the Save button.", ) finally: alert.dismiss() def test_collapse_filters(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") self.selenium.get(self.live_server_url + reverse("admin:auth_user_changelist")) # The UserAdmin has 3 field filters by default: "staff status", # "superuser status", and "active". details = self.selenium.find_elements(By.CSS_SELECTOR, "details") # All filters are opened at first. for detail in details: self.assertTrue(detail.get_attribute("open")) # Collapse "staff' and "superuser" filters. for detail in details[:2]: summary = detail.find_element(By.CSS_SELECTOR, "summary") summary.click() self.assertFalse(detail.get_attribute("open")) # Filters are in the same state after refresh. self.selenium.refresh() self.assertFalse( self.selenium.find_element( By.CSS_SELECTOR, "[data-filter-title='staff status']" ).get_attribute("open") ) self.assertFalse( self.selenium.find_element( By.CSS_SELECTOR, "[data-filter-title='superuser status']" ).get_attribute("open") ) self.assertTrue( self.selenium.find_element( By.CSS_SELECTOR, "[data-filter-title='active']" ).get_attribute("open") ) # Collapse a filter on another view (Bands). self.selenium.get( self.live_server_url + reverse("admin:admin_changelist_band_changelist") ) self.selenium.find_element(By.CSS_SELECTOR, "summary").click() # Go to Users view and then, back again to Bands view. self.selenium.get(self.live_server_url + reverse("admin:auth_user_changelist")) self.selenium.get( self.live_server_url + reverse("admin:admin_changelist_band_changelist") ) # The filter remains in the same state. self.assertFalse( self.selenium.find_element( By.CSS_SELECTOR, "[data-filter-title='number of members']", ).get_attribute("open") ) def test_collapse_filter_with_unescaped_title(self): from selenium.webdriver.common.by import By self.admin_login(username="super", password="secret") changelist_url = reverse("admin:admin_changelist_proxyuser_changelist") self.selenium.get(self.live_server_url + changelist_url) # Title is escaped. filter_title = self.selenium.find_element( By.CSS_SELECTOR, "[data-filter-title='It\\'s OK']" ) filter_title.find_element(By.CSS_SELECTOR, "summary").click() self.assertFalse(filter_title.get_attribute("open")) # Filter is in the same state after refresh. self.selenium.refresh() self.assertFalse( self.selenium.find_element( By.CSS_SELECTOR, "[data-filter-title='It\\'s OK']" ).get_attribute("open") ) def test_list_display_ordering(self): from selenium.webdriver.common.by import By parent_a = Parent.objects.create(name="Parent A") child_l = Child.objects.create(name="Child L", parent=None) child_m = Child.objects.create(name="Child M", parent=parent_a) GrandChild.objects.create(name="Grandchild X", parent=child_m) GrandChild.objects.create(name="Grandchild Y", parent=child_l) GrandChild.objects.create(name="Grandchild Z", parent=None) self.admin_login(username="super", password="secret") changelist_url = reverse("admin:admin_changelist_grandchild_changelist") self.selenium.get(self.live_server_url + changelist_url) def find_result_row_texts(): table = self.selenium.find_element(By.ID, "result_list") # Drop header from the result list return [row.text for row in table.find_elements(By.TAG_NAME, "tr")][1:] def expected_from_queryset(qs): return [ " ".join("-" if i is None else i for i in item) for item in qs.values_list( "name", "parent__name", "parent__parent__name" ) ] cases = [ # Order ascending by `name`. ("th.sortable.column-name", ("name",)), # Order descending by `name`. ("th.sortable.column-name", ("-name",)), # Order ascending by `parent__name`. ("th.sortable.column-parent__name", ("parent__name", "-name")), # Order descending by `parent__name`. ("th.sortable.column-parent__name", ("-parent__name", "-name")), # Order ascending by `parent__parent__name`. ( "th.sortable.column-parent__parent__name", ("parent__parent__name", "-parent__name", "-name"), ), # Order descending by `parent__parent__name`. ( "th.sortable.column-parent__parent__name", ("-parent__parent__name", "-parent__name", "-name"), ), ] for css_selector, ordering in cases: with self.subTest(ordering=ordering): self.selenium.find_element(By.CSS_SELECTOR, css_selector).click() expected = expected_from_queryset( GrandChild.objects.all().order_by(*ordering) ) self.assertEqual(find_result_row_texts(), expected)
SeleniumTests
python
run-llama__llama_index
llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/llama_index/llms/cloudflare_ai_gateway/base.py
{ "start": 925, "end": 1039 }
class ____(Exception): """Base exception for Cloudflare AI Gateway errors.""" pass
CloudflareAIGatewayError
python
boto__boto3
boto3/dynamodb/conditions.py
{ "start": 6497, "end": 6602 }
class ____(ConditionBase): expression_operator = 'OR' expression_format = '({0} {operator} {1})'
Or
python
astropy__astropy
astropy/modeling/spline.py
{ "start": 587, "end": 6960 }
class ____(FittableModel): """Base class for spline models.""" _knot_names = () _coeff_names = () optional_inputs = {} def __init__( self, knots=None, coeffs=None, degree=None, bounds=None, n_models=None, model_set_axis=None, name=None, meta=None, ): super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta ) self._user_knots = False self._init_tck(degree) # Hack to allow an optional model argument self._create_optional_inputs() if knots is not None: self._init_spline(knots, coeffs, bounds) elif coeffs is not None: raise ValueError( "If one passes a coeffs vector one needs to also pass knots!" ) @property def param_names(self): """ Coefficient names generated based on the spline's degree and number of knots. """ return tuple(list(self._knot_names) + list(self._coeff_names)) @staticmethod def _optional_arg(arg): return f"_{arg}" def _create_optional_inputs(self): for arg in self.optional_inputs: attribute = self._optional_arg(arg) if hasattr(self, attribute): raise ValueError( f"Optional argument {arg} already exists in this class!" ) else: setattr(self, attribute, None) def _intercept_optional_inputs(self, **kwargs): new_kwargs = kwargs for arg in self.optional_inputs: if arg in kwargs: attribute = self._optional_arg(arg) if getattr(self, attribute) is None: setattr(self, attribute, kwargs[arg]) del new_kwargs[arg] else: raise RuntimeError( f"{arg} has already been set, something has gone wrong!" ) return new_kwargs def evaluate(self, *args, **kwargs): """Extract the optional kwargs passed to call.""" optional_inputs = kwargs for arg in self.optional_inputs: attribute = self._optional_arg(arg) if arg in kwargs: # Options passed in optional_inputs[arg] = kwargs[arg] elif getattr(self, attribute) is not None: # No options passed in and Options set optional_inputs[arg] = getattr(self, attribute) setattr(self, attribute, None) else: # No options passed in and No options set optional_inputs[arg] = self.optional_inputs[arg] return optional_inputs def __call__(self, *args, **kwargs): """ Make model callable to model evaluation. """ # Hack to allow an optional model argument kwargs = self._intercept_optional_inputs(**kwargs) return super().__call__(*args, **kwargs) def _create_parameter(self, name: str, index: int, attr: str, fixed=False): """ Create a spline parameter linked to an attribute array. Parameters ---------- name : str Name for the parameter index : int The index of the parameter in the array attr : str The name for the attribute array fixed : optional, bool If the parameter should be fixed or not """ # Hack to allow parameters and attribute array to freely exchange values # _getter forces reading value from attribute array # _setter forces setting value to attribute array def _getter(value, model: "_Spline", index: int, attr: str): return getattr(model, attr)[index] def _setter(value, model: "_Spline", index: int, attr: str): getattr(model, attr)[index] = value return value getter = functools.partial(_getter, index=index, attr=attr) setter = functools.partial(_setter, index=index, attr=attr) default = getattr(self, attr) param = Parameter( name=name, default=default[index], fixed=fixed, getter=getter, setter=setter ) # setter/getter wrapper for parameters in this case require the # parameter to have a reference back to its parent model param.model = self param.value = default[index] # Add parameter to model self.__dict__[name] = param def _create_parameters(self, base_name: str, attr: str, fixed=False): """ Create a spline parameters linked to an attribute array for all elements in that array. Parameters ---------- base_name : str Base name for the parameters attr : str The name for the attribute array fixed : optional, bool If the parameters should be fixed or not """ names = [] for index in range(len(getattr(self, attr))): name = f"{base_name}{index}" names.append(name) self._create_parameter(name, index, attr, fixed) return tuple(names) @abc.abstractmethod def _init_parameters(self): raise NotImplementedError("This needs to be implemented") @abc.abstractmethod def _init_data(self, knots, coeffs, bounds=None): raise NotImplementedError("This needs to be implemented") def _init_spline(self, knots, coeffs, bounds=None): self._init_data(knots, coeffs, bounds) self._init_parameters() # fill _parameters and related attributes self._initialize_parameters((), {}) self._initialize_slices() # Calling this will properly fill the _parameter vector, which is # used directly sometimes without being properly filled. _ = self.parameters def _init_tck(self, degree): self._c = None self._t = None self._degree = degree def __getstate__(self): return { "t": self._t, "c": self._c, "k": self._degree, } def __setstate__(self, state): return self.__init__(knots=state["t"], coeffs=state["c"], degree=state["k"])
_Spline
python
python-poetry__poetry
src/poetry/utils/password_manager.py
{ "start": 442, "end": 588 }
class ____: username: str | None = dataclasses.field(default=None) password: str | None = dataclasses.field(default=None)
HTTPAuthCredential
python
fastai__fastai
fastai/metrics.py
{ "start": 17345, "end": 17880 }
class ____(Metric): "Dice coefficient metric for binary target in segmentation" def __init__(self, axis=1): self.axis = axis def reset(self): self.inter,self.union = 0,0 def accumulate(self, learn): pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y) self.inter += (pred*targ).float().sum().item() self.union += (pred+targ).float().sum().item() @property def value(self): return 2. * self.inter/self.union if self.union > 0 else None # %% ../nbs/13b_metrics.ipynb 116
Dice
python
FactoryBoy__factory_boy
tests/utils.py
{ "start": 996, "end": 1449 }
class ____(MultiModulePatcher): """A context processor changing the value of date.today().""" def __init__(self, target_date, *target_modules, **kwargs): self.target_date = target_date super().__init__(*target_modules, **kwargs) def _build_patcher(self, target_module): module_datetime = getattr(target_module, 'datetime') return alter_time.mock_date_today(self.target_date, module_datetime)
mocked_date_today
python
doocs__leetcode
solution/2800-2899/2831.Find the Longest Equal Subarray/Solution.py
{ "start": 0, "end": 341 }
class ____: def longestEqualSubarray(self, nums: List[int], k: int) -> int: cnt = Counter() l = 0 mx = 0 for r, x in enumerate(nums): cnt[x] += 1 mx = max(mx, cnt[x]) if r - l + 1 - mx > k: cnt[nums[l]] -= 1 l += 1 return mx
Solution
python
ansible__ansible
test/lib/ansible_test/_internal/util.py
{ "start": 26359, "end": 29873 }
class ____: """Manages color console output.""" clear = '\033[0m' red = '\033[31m' green = '\033[32m' yellow = '\033[33m' blue = '\033[34m' purple = '\033[35m' cyan = '\033[36m' verbosity_colors = { 0: None, 1: green, 2: blue, 3: cyan, } def __init__(self) -> None: self.verbosity = 0 self.color = sys.stdout.isatty() self.warnings: list[str] = [] self.warnings_unique: set[str] = set() self.fd = sys.stderr # default to stderr until config is initialized to avoid early messages going to stdout self.rows = 0 self.columns = 0 self.truncate = 0 self.redact = True self.sensitive: set[str] = set() if os.isatty(0): self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2] def __warning(self, message: str) -> None: """Internal implementation for displaying a warning message.""" self.print_message('WARNING: %s' % message, color=self.purple) def review_warnings(self) -> None: """Review all warnings which previously occurred.""" if not self.warnings: return self.__warning('Reviewing previous %d warning(s):' % len(self.warnings)) for warning in self.warnings: self.__warning(warning) def warning(self, message: str, unique: bool = False, verbosity: int = 0) -> None: """Display a warning level message.""" if verbosity > self.verbosity: return if unique: if message in self.warnings_unique: return self.warnings_unique.add(message) self.__warning(message) self.warnings.append(message) def notice(self, message: str) -> None: """Display a notice level message.""" self.print_message('NOTICE: %s' % message, color=self.purple) def error(self, message: str) -> None: """Display an error level message.""" self.print_message('ERROR: %s' % message, color=self.red) def fatal(self, message: str) -> None: """Display a fatal level message.""" self.print_message('FATAL: %s' % message, color=self.red, stderr=True) def info(self, message: str, verbosity: int = 0, truncate: bool = False) -> None: """Display an info level message.""" if self.verbosity >= verbosity: color = self.verbosity_colors.get(verbosity, self.yellow) self.print_message(message, color=color, truncate=truncate) def print_message( # pylint: disable=locally-disabled, invalid-name self, message: str, color: t.Optional[str] = None, stderr: bool = False, truncate: bool = False, ) -> None: """Display a message.""" if self.redact and self.sensitive: for item in self.sensitive: if not item: continue message = message.replace(item, '*' * len(item)) if truncate: if len(message) > self.truncate > 5: message = message[:self.truncate - 5] + ' ...' if color and self.color: # convert color resets in message to desired color message = message.replace(self.clear, color) message = '%s%s%s' % (color, message, self.clear) fd = sys.stderr if stderr else self.fd print(message, file=fd) fd.flush()
Display
python
python-attrs__attrs
typing-examples/mypy.py
{ "start": 6909, "end": 7017 }
class ____: x: int def __init__(self, x: int): self.x = x @attr.define(order=True)
AutoDetect
python
plotly__plotly.py
plotly/graph_objs/surface/_colorbar.py
{ "start": 233, "end": 61470 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "surface" _path_str = "surface.colorbar" _valid_props = { "bgcolor", "bordercolor", "borderwidth", "dtick", "exponentformat", "labelalias", "len", "lenmode", "minexponent", "nticks", "orientation", "outlinecolor", "outlinewidth", "separatethousands", "showexponent", "showticklabels", "showtickprefix", "showticksuffix", "thickness", "thicknessmode", "tick0", "tickangle", "tickcolor", "tickfont", "tickformat", "tickformatstopdefaults", "tickformatstops", "ticklabeloverflow", "ticklabelposition", "ticklabelstep", "ticklen", "tickmode", "tickprefix", "ticks", "ticksuffix", "ticktext", "ticktextsrc", "tickvals", "tickvalssrc", "tickwidth", "title", "x", "xanchor", "xpad", "xref", "y", "yanchor", "ypad", "yref", } @property def bgcolor(self): """ Sets the color of padded area. The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["bgcolor"] @bgcolor.setter def bgcolor(self, val): self["bgcolor"] = val @property def bordercolor(self): """ Sets the axis line color. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["bordercolor"] @bordercolor.setter def bordercolor(self, val): self["bordercolor"] = val @property def borderwidth(self): """ Sets the width (in px) or the border enclosing this color bar. The 'borderwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["borderwidth"] @borderwidth.setter def borderwidth(self, val): self["borderwidth"] = val @property def dtick(self): """ Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" The 'dtick' property accepts values of any type Returns ------- Any """ return self["dtick"] @dtick.setter def dtick(self, val): self["dtick"] = val @property def exponentformat(self): """ Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. The 'exponentformat' property is an enumeration that may be specified as: - One of the following enumeration values: ['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended'] Returns ------- Any """ return self["exponentformat"] @exponentformat.setter def exponentformat(self, val): self["exponentformat"] = val @property def labelalias(self): """ Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html- like tags or MathJax. The 'labelalias' property accepts values of any type Returns ------- Any """ return self["labelalias"] @labelalias.setter def labelalias(self, val): self["labelalias"] = val @property def len(self): """ Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. The 'len' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["len"] @len.setter def len(self, val): self["len"] = val @property def lenmode(self): """ Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. The 'lenmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels'] Returns ------- Any """ return self["lenmode"] @lenmode.setter def lenmode(self, val): self["lenmode"] = val @property def minexponent(self): """ Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". The 'minexponent' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["minexponent"] @minexponent.setter def minexponent(self, val): self["minexponent"] = val @property def nticks(self): """ Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". The 'nticks' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [0, 9223372036854775807] Returns ------- int """ return self["nticks"] @nticks.setter def nticks(self, val): self["nticks"] = val @property def orientation(self): """ Sets the orientation of the colorbar. The 'orientation' property is an enumeration that may be specified as: - One of the following enumeration values: ['h', 'v'] Returns ------- Any """ return self["orientation"] @orientation.setter def orientation(self, val): self["orientation"] = val @property def outlinecolor(self): """ Sets the axis line color. The 'outlinecolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["outlinecolor"] @outlinecolor.setter def outlinecolor(self, val): self["outlinecolor"] = val @property def outlinewidth(self): """ Sets the width (in px) of the axis line. The 'outlinewidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["outlinewidth"] @outlinewidth.setter def outlinewidth(self, val): self["outlinewidth"] = val @property def separatethousands(self): """ If "true", even 4-digit integers are separated The 'separatethousands' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["separatethousands"] @separatethousands.setter def separatethousands(self, val): self["separatethousands"] = val @property def showexponent(self): """ If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. The 'showexponent' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showexponent"] @showexponent.setter def showexponent(self, val): self["showexponent"] = val @property def showticklabels(self): """ Determines whether or not the tick labels are drawn. The 'showticklabels' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["showticklabels"] @showticklabels.setter def showticklabels(self, val): self["showticklabels"] = val @property def showtickprefix(self): """ If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. The 'showtickprefix' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showtickprefix"] @showtickprefix.setter def showtickprefix(self, val): self["showtickprefix"] = val @property def showticksuffix(self): """ Same as `showtickprefix` but for tick suffixes. The 'showticksuffix' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showticksuffix"] @showticksuffix.setter def showticksuffix(self, val): self["showticksuffix"] = val @property def thickness(self): """ Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. The 'thickness' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["thickness"] @thickness.setter def thickness(self, val): self["thickness"] = val @property def thicknessmode(self): """ Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. The 'thicknessmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels'] Returns ------- Any """ return self["thicknessmode"] @thicknessmode.setter def thicknessmode(self, val): self["thicknessmode"] = val @property def tick0(self): """ Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. The 'tick0' property accepts values of any type Returns ------- Any """ return self["tick0"] @tick0.setter def tick0(self, val): self["tick0"] = val @property def tickangle(self): """ Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. The 'tickangle' property is a angle (in degrees) that may be specified as a number between -180 and 180. Numeric values outside this range are converted to the equivalent value (e.g. 270 is converted to -90). Returns ------- int|float """ return self["tickangle"] @tickangle.setter def tickangle(self, val): self["tickangle"] = val @property def tickcolor(self): """ Sets the tick color. The 'tickcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["tickcolor"] @tickcolor.setter def tickcolor(self, val): self["tickcolor"] = val @property def tickfont(self): """ Sets the color bar's tick label font The 'tickfont' property is an instance of Tickfont that may be specified as: - An instance of :class:`plotly.graph_objs.surface.colorbar.Tickfont` - A dict of string/value properties that will be passed to the Tickfont constructor Returns ------- plotly.graph_objs.surface.colorbar.Tickfont """ return self["tickfont"] @tickfont.setter def tickfont(self, val): self["tickfont"] = val @property def tickformat(self): """ Sets the tick label formatting rule using d3 formatting mini- languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" The 'tickformat' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["tickformat"] @tickformat.setter def tickformat(self, val): self["tickformat"] = val @property def tickformatstops(self): """ The 'tickformatstops' property is a tuple of instances of Tickformatstop that may be specified as: - A list or tuple of instances of plotly.graph_objs.surface.colorbar.Tickformatstop - A list or tuple of dicts of string/value properties that will be passed to the Tickformatstop constructor Returns ------- tuple[plotly.graph_objs.surface.colorbar.Tickformatstop] """ return self["tickformatstops"] @tickformatstops.setter def tickformatstops(self, val): self["tickformatstops"] = val @property def tickformatstopdefaults(self): """ When used in a template (as layout.template.data.surface.colorbar.tickformatstopdefaults), sets the default property values to use for elements of surface.colorbar.tickformatstops The 'tickformatstopdefaults' property is an instance of Tickformatstop that may be specified as: - An instance of :class:`plotly.graph_objs.surface.colorbar.Tickformatstop` - A dict of string/value properties that will be passed to the Tickformatstop constructor Returns ------- plotly.graph_objs.surface.colorbar.Tickformatstop """ return self["tickformatstopdefaults"] @tickformatstopdefaults.setter def tickformatstopdefaults(self, val): self["tickformatstopdefaults"] = val @property def ticklabeloverflow(self): """ Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. The 'ticklabeloverflow' property is an enumeration that may be specified as: - One of the following enumeration values: ['allow', 'hide past div', 'hide past domain'] Returns ------- Any """ return self["ticklabeloverflow"] @ticklabeloverflow.setter def ticklabeloverflow(self, val): self["ticklabeloverflow"] = val @property def ticklabelposition(self): """ Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". The 'ticklabelposition' property is an enumeration that may be specified as: - One of the following enumeration values: ['outside', 'inside', 'outside top', 'inside top', 'outside left', 'inside left', 'outside right', 'inside right', 'outside bottom', 'inside bottom'] Returns ------- Any """ return self["ticklabelposition"] @ticklabelposition.setter def ticklabelposition(self, val): self["ticklabelposition"] = val @property def ticklabelstep(self): """ Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". The 'ticklabelstep' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 9223372036854775807] Returns ------- int """ return self["ticklabelstep"] @ticklabelstep.setter def ticklabelstep(self, val): self["ticklabelstep"] = val @property def ticklen(self): """ Sets the tick length (in px). The 'ticklen' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["ticklen"] @ticklen.setter def ticklen(self, val): self["ticklen"] = val @property def tickmode(self): """ Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). The 'tickmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['auto', 'linear', 'array'] Returns ------- Any """ return self["tickmode"] @tickmode.setter def tickmode(self, val): self["tickmode"] = val @property def tickprefix(self): """ Sets a tick label prefix. The 'tickprefix' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["tickprefix"] @tickprefix.setter def tickprefix(self, val): self["tickprefix"] = val @property def ticks(self): """ Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. The 'ticks' property is an enumeration that may be specified as: - One of the following enumeration values: ['outside', 'inside', ''] Returns ------- Any """ return self["ticks"] @ticks.setter def ticks(self, val): self["ticks"] = val @property def ticksuffix(self): """ Sets a tick label suffix. The 'ticksuffix' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["ticksuffix"] @ticksuffix.setter def ticksuffix(self, val): self["ticksuffix"] = val @property def ticktext(self): """ Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. The 'ticktext' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["ticktext"] @ticktext.setter def ticktext(self, val): self["ticktext"] = val @property def ticktextsrc(self): """ Sets the source reference on Chart Studio Cloud for `ticktext`. The 'ticktextsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["ticktextsrc"] @ticktextsrc.setter def ticktextsrc(self, val): self["ticktextsrc"] = val @property def tickvals(self): """ Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. The 'tickvals' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["tickvals"] @tickvals.setter def tickvals(self, val): self["tickvals"] = val @property def tickvalssrc(self): """ Sets the source reference on Chart Studio Cloud for `tickvals`. The 'tickvalssrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["tickvalssrc"] @tickvalssrc.setter def tickvalssrc(self, val): self["tickvalssrc"] = val @property def tickwidth(self): """ Sets the tick width (in px). The 'tickwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["tickwidth"] @tickwidth.setter def tickwidth(self, val): self["tickwidth"] = val @property def title(self): """ The 'title' property is an instance of Title that may be specified as: - An instance of :class:`plotly.graph_objs.surface.colorbar.Title` - A dict of string/value properties that will be passed to the Title constructor Returns ------- plotly.graph_objs.surface.colorbar.Title """ return self["title"] @title.setter def title(self, val): self["title"] = val @property def x(self): """ Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". The 'x' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["x"] @x.setter def x(self, val): self["x"] = val @property def xanchor(self): """ Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". The 'xanchor' property is an enumeration that may be specified as: - One of the following enumeration values: ['left', 'center', 'right'] Returns ------- Any """ return self["xanchor"] @xanchor.setter def xanchor(self, val): self["xanchor"] = val @property def xpad(self): """ Sets the amount of padding (in px) along the x direction. The 'xpad' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["xpad"] @xpad.setter def xpad(self, val): self["xpad"] = val @property def xref(self): """ Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. The 'xref' property is an enumeration that may be specified as: - One of the following enumeration values: ['container', 'paper'] Returns ------- Any """ return self["xref"] @xref.setter def xref(self, val): self["xref"] = val @property def y(self): """ Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". The 'y' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["y"] @y.setter def y(self, val): self["y"] = val @property def yanchor(self): """ Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". The 'yanchor' property is an enumeration that may be specified as: - One of the following enumeration values: ['top', 'middle', 'bottom'] Returns ------- Any """ return self["yanchor"] @yanchor.setter def yanchor(self, val): self["yanchor"] = val @property def ypad(self): """ Sets the amount of padding (in px) along the y direction. The 'ypad' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["ypad"] @ypad.setter def ypad(self, val): self["ypad"] = val @property def yref(self): """ Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. The 'yref' property is an enumeration that may be specified as: - One of the following enumeration values: ['container', 'paper'] Returns ------- Any """ return self["yref"] @yref.setter def yref(self, val): self["yref"] = val @property def _prop_descriptions(self): return """\ bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. labelalias Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html-like tags or MathJax. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. minexponent Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". orientation Sets the orientation of the colorbar. outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.surface.colorba r.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.data.surfac e.colorbar.tickformatstopdefaults), sets the default property values to use for elements of surface.colorbar.tickformatstops ticklabeloverflow Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. ticklabelposition Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". ticklabelstep Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on Chart Studio Cloud for `ticktext`. tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on Chart Studio Cloud for `tickvals`. tickwidth Sets the tick width (in px). title :class:`plotly.graph_objects.surface.colorbar.Title` instance or dict with compatible properties x Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". xanchor Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". xpad Sets the amount of padding (in px) along the x direction. xref Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. y Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". yanchor Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". ypad Sets the amount of padding (in px) along the y direction. yref Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. """ def __init__( self, arg=None, bgcolor=None, bordercolor=None, borderwidth=None, dtick=None, exponentformat=None, labelalias=None, len=None, lenmode=None, minexponent=None, nticks=None, orientation=None, outlinecolor=None, outlinewidth=None, separatethousands=None, showexponent=None, showticklabels=None, showtickprefix=None, showticksuffix=None, thickness=None, thicknessmode=None, tick0=None, tickangle=None, tickcolor=None, tickfont=None, tickformat=None, tickformatstops=None, tickformatstopdefaults=None, ticklabeloverflow=None, ticklabelposition=None, ticklabelstep=None, ticklen=None, tickmode=None, tickprefix=None, ticks=None, ticksuffix=None, ticktext=None, ticktextsrc=None, tickvals=None, tickvalssrc=None, tickwidth=None, title=None, x=None, xanchor=None, xpad=None, xref=None, y=None, yanchor=None, ypad=None, yref=None, **kwargs, ): """ Construct a new ColorBar object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.surface.ColorBar` bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. labelalias Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html-like tags or MathJax. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. minexponent Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". orientation Sets the orientation of the colorbar. outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.surface.colorba r.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.data.surfac e.colorbar.tickformatstopdefaults), sets the default property values to use for elements of surface.colorbar.tickformatstops ticklabeloverflow Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. ticklabelposition Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". ticklabelstep Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on Chart Studio Cloud for `ticktext`. tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on Chart Studio Cloud for `tickvals`. tickwidth Sets the tick width (in px). title :class:`plotly.graph_objects.surface.colorbar.Title` instance or dict with compatible properties x Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". xanchor Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". xpad Sets the amount of padding (in px) along the x direction. xref Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. y Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". yanchor Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". ypad Sets the amount of padding (in px) along the y direction. yref Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. Returns ------- ColorBar """ super().__init__("colorbar") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.surface.ColorBar constructor must be a dict or an instance of :class:`plotly.graph_objs.surface.ColorBar`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("bgcolor", arg, bgcolor) self._set_property("bordercolor", arg, bordercolor) self._set_property("borderwidth", arg, borderwidth) self._set_property("dtick", arg, dtick) self._set_property("exponentformat", arg, exponentformat) self._set_property("labelalias", arg, labelalias) self._set_property("len", arg, len) self._set_property("lenmode", arg, lenmode) self._set_property("minexponent", arg, minexponent) self._set_property("nticks", arg, nticks) self._set_property("orientation", arg, orientation) self._set_property("outlinecolor", arg, outlinecolor) self._set_property("outlinewidth", arg, outlinewidth) self._set_property("separatethousands", arg, separatethousands) self._set_property("showexponent", arg, showexponent) self._set_property("showticklabels", arg, showticklabels) self._set_property("showtickprefix", arg, showtickprefix) self._set_property("showticksuffix", arg, showticksuffix) self._set_property("thickness", arg, thickness) self._set_property("thicknessmode", arg, thicknessmode) self._set_property("tick0", arg, tick0) self._set_property("tickangle", arg, tickangle) self._set_property("tickcolor", arg, tickcolor) self._set_property("tickfont", arg, tickfont) self._set_property("tickformat", arg, tickformat) self._set_property("tickformatstops", arg, tickformatstops) self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults) self._set_property("ticklabeloverflow", arg, ticklabeloverflow) self._set_property("ticklabelposition", arg, ticklabelposition) self._set_property("ticklabelstep", arg, ticklabelstep) self._set_property("ticklen", arg, ticklen) self._set_property("tickmode", arg, tickmode) self._set_property("tickprefix", arg, tickprefix) self._set_property("ticks", arg, ticks) self._set_property("ticksuffix", arg, ticksuffix) self._set_property("ticktext", arg, ticktext) self._set_property("ticktextsrc", arg, ticktextsrc) self._set_property("tickvals", arg, tickvals) self._set_property("tickvalssrc", arg, tickvalssrc) self._set_property("tickwidth", arg, tickwidth) self._set_property("title", arg, title) self._set_property("x", arg, x) self._set_property("xanchor", arg, xanchor) self._set_property("xpad", arg, xpad) self._set_property("xref", arg, xref) self._set_property("y", arg, y) self._set_property("yanchor", arg, yanchor) self._set_property("ypad", arg, ypad) self._set_property("yref", arg, yref) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
ColorBar
python
catalyst-team__catalyst
examples/detection/criterion.py
{ "start": 85, "end": 4703 }
class ____(nn.Module): def __init__(self, num_classes, ignore_class=0): super().__init__() self.num_classes = num_classes self.ignore_class = ignore_class def _hard_negative_mining(self, cls_loss, pos): """Return negative indices that is 3x the number as positive indices. Args: cls_loss: (torch.Tensor) cross entropy loss between `cls_preds` and `cls_targets`. Expected shape [B, M] where B - batch, M - anchors. pos: (torch.Tensor) positive class mask. Expected shape [B, M] where B - batch, M - anchors. Return: (torch.Tensor) negative indices, sized [N,#anchors]. """ cls_loss = cls_loss * (pos.float() - 1) _, idx = cls_loss.sort(1) # sort by negative losses _, rank = idx.sort(1) # [B, M] num_neg = 3 * pos.sum(1) # [B] neg = rank < num_neg[:, None] # [B, M] return neg def forward(self, loc_preds, loc_targets, cls_preds, cls_targets): """Compute loss between (loc_preds, loc_targets) and (cls_preds, cls_targets). Loss: loss = SmoothL1Loss(loc_preds, loc_targets) + CrossEntropyLoss(cls_preds, cls_targets). Args: loc_preds: (torch.Tensor) predicted locations. Expected shapes - [B, M, 4] where B - batch, M - anchors. loc_targets: (torch.Tensor) encoded target locations. Expected shapes - [B, M, 4] where B - batch, M - anchors. cls_preds: (torch.Tensor) predicted class confidences. Expected shapes - [B, M, CLASS] where B - batch, M - anchors, CLASS - number of classes. cls_targets: (torch.LongTensor) encoded target labels. Expected shapes - [B, M] where B - batch, M - anchors. Returns: regression loss and classification loss """ pos = cls_targets != self.ignore_class # not background batch_size = pos.size(0) num_pos = pos.sum().item() mask = pos.unsqueeze(2).expand_as(loc_preds) # [B, M, 4] loc_loss = F.smooth_l1_loss(loc_preds[mask], loc_targets[mask], reduction="sum") cls_loss = F.cross_entropy( cls_preds.view(-1, self.num_classes), cls_targets.view(-1), reduction="none" ) # [B * M] cls_loss = cls_loss.view(batch_size, -1) cls_loss[cls_targets == self.ignore_class] = 0 # set ignored loss to 0 neg = self._hard_negative_mining(cls_loss, pos) # [B, M] cls_loss = cls_loss[pos | neg].sum() loc_loss = loc_loss / num_pos cls_loss = cls_loss / num_pos return loc_loss, cls_loss def reg_loss(regr, gt_regr, mask): """L1 regression loss Args: regr (torch.Tensor): tensor with HW regression predicted by model, should have shapes [batch, max_objects, dim] gt_regr (torch.Tensor): tensor with ground truth regression values, should have shapes [batch, max_objects, dim] mask (torch.Tensor): objects mask, should have shape [batch, max_objects] Returns: torch.Tensor with regression loss value """ num = mask.float().sum() mask = mask.sum(1).unsqueeze(1).expand_as(gt_regr) regr = regr * mask gt_regr = gt_regr * mask regr_loss = F.smooth_l1_loss(regr, gt_regr, size_average=False) regr_loss = regr_loss / (num + 1e-4) return regr_loss def neg_loss(pred, gt): """Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory Args: pred (torch.Tensor): predicted center heatmaps, should have shapes [batch, c, h, w] gt (torch.Tensor): ground truth center heatmaps, should have shapes [batch, c, h, w] Returns: torch.Tensor with focal loss value. """ pred = pred.unsqueeze(1).float() gt = gt.unsqueeze(1).float() positive_inds = gt.eq(1).float() negative_inds = gt.lt(1).float() negative_weights = torch.pow(1 - gt, 4) loss = 0 positive_loss = torch.log(pred + 1e-12) * torch.pow(1 - pred, 3) * positive_inds negative_loss = ( torch.log(1 - pred + 1e-12) * torch.pow(pred, 3) * negative_weights * negative_inds ) num_pos = positive_inds.float().sum() positive_loss = positive_loss.sum() negative_loss = negative_loss.sum() if num_pos == 0: loss = loss - negative_loss else: loss = loss - (positive_loss + negative_loss) / num_pos return loss
SSDCriterion
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-vectara/destination_vectara/config.py
{ "start": 209, "end": 705 }
class ____(BaseModel): client_id: str = Field(..., title="OAuth Client ID", description="OAuth2.0 client id", order=0) client_secret: str = Field(..., title="OAuth Client Secret", description="OAuth2.0 client secret", airbyte_secret=True, order=1) class Config: title = "OAuth2.0 Credentials" schema_extra = { "description": "OAuth2.0 credentials used to authenticate admin actions (creating/deleting corpora)", "group": "auth", }
OAuth2
python
airbytehq__airbyte
airbyte-integrations/connectors/source-iterable/source_iterable/streams.py
{ "start": 18102, "end": 18189 }
class ____(IterableExportEventsStreamAdjustableRange): data_field = "smsSend"
SmsSend
python
run-llama__llama_index
llama-index-core/llama_index/core/node_parser/file/html.py
{ "start": 633, "end": 4691 }
class ____(NodeParser): """ HTML node parser. Splits a document into Nodes using custom HTML splitting logic. Args: include_metadata (bool): whether to include metadata in nodes include_prev_next_rel (bool): whether to include prev/next relationships """ tags: List[str] = Field( default=DEFAULT_TAGS, description="HTML tags to extract text from." ) @classmethod def from_defaults( cls, include_metadata: bool = True, include_prev_next_rel: bool = True, callback_manager: Optional[CallbackManager] = None, tags: Optional[List[str]] = DEFAULT_TAGS, ) -> "HTMLNodeParser": callback_manager = callback_manager or CallbackManager([]) return cls( include_metadata=include_metadata, include_prev_next_rel=include_prev_next_rel, callback_manager=callback_manager, tags=tags, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "HTMLNodeParser" def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any, ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: nodes = self.get_nodes_from_node(node) all_nodes.extend(nodes) return all_nodes def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]: """Get nodes from document.""" try: from bs4 import BeautifulSoup, Tag except ImportError: raise ImportError("bs4 is required to read HTML files.") text = node.get_content(metadata_mode=MetadataMode.NONE) soup = BeautifulSoup(text, "html.parser") html_nodes = [] last_tag = None current_section = "" tags = soup.find_all(self.tags) for tag in tags: tag_text = self._extract_text_from_tag(tag) if isinstance(tag, Tag) and (tag.name == last_tag or last_tag is None): last_tag = tag.name current_section += f"{tag_text.strip()}\n" else: html_nodes.append( self._build_node_from_split( current_section.strip(), node, {"tag": last_tag} ) ) if isinstance(tag, Tag): last_tag = tag.name current_section = f"{tag_text}\n" if current_section: html_nodes.append( self._build_node_from_split( current_section.strip(), node, {"tag": last_tag} ) ) return html_nodes def _extract_text_from_tag( self, tag: Union["Tag", "NavigableString", "PageElement"] ) -> str: from bs4 import NavigableString, Tag, PageElement texts = [] if isinstance(tag, Tag): for elem in tag.children: if isinstance(elem, NavigableString): if elem.strip(): texts.append(elem.strip()) elif isinstance(elem, Tag): if elem.name in self.tags: continue else: texts.append(elem.get_text().strip()) elif isinstance(elem, PageElement): texts.append(elem.get_text().strip()) else: texts.append(tag.get_text().strip()) return "\n".join(texts) def _build_node_from_split( self, text_split: str, node: BaseNode, metadata: dict, ) -> TextNode: """Build node from single text split.""" node = build_nodes_from_splits([text_split], node, id_func=self.id_func)[0] if self.include_metadata: node.metadata = {**node.metadata, **metadata} return node
HTMLNodeParser
python
getsentry__sentry
src/sentry/search/events/builder/discover.py
{ "start": 919, "end": 6256 }
class ____(BaseQueryBuilder): """Builds a discover query""" uuid_fields = { "id", "trace", "profile.id", "replay.id", } span_id_fields = { "trace.span", "trace.parent_span", } duration_fields = {"transaction.duration", "span.duration"} def load_config( self, ) -> DatasetConfig: # Necessary until more classes inherit from BaseQueryBuilder instead if hasattr(self, "config_class") and self.config_class is not None: return super().load_config() self.config: DatasetConfig if self.dataset in [ Dataset.Discover, Dataset.Transactions, Dataset.Events, Dataset.IssuePlatform, ]: return DiscoverDatasetConfig(self) else: raise NotImplementedError(f"Data Set configuration not found for {self.dataset}.") def resolve_field(self, raw_field: str, alias: bool = False) -> Column: tag_match = constants.TAG_KEY_RE.search(raw_field) field = tag_match.group("tag") if tag_match else raw_field if field == "group_id": # We don't expose group_id publicly, so if a user requests it # we expect it is a custom tag. Convert it to tags[group_id] # and ensure it queries tag data # These maps are updated so the response can be mapped back to group_id self.tag_to_prefixed_map["group_id"] = "tags[group_id]" self.prefixed_to_tag_map["tags[group_id]"] = "group_id" raw_field = "tags[group_id]" return super().resolve_field(raw_field, alias) def resolve_projects(self) -> list[int]: if self.params.organization_id and in_rollout_group( "sentry.search.events.project.check_event", self.params.organization_id ): if self.dataset == Dataset.Discover: project_ids = [ proj.id for proj in self.params.projects if proj.flags.has_transactions or proj.first_event is not None ] elif self.dataset == Dataset.Events: project_ids = [ proj.id for proj in self.params.projects if proj.first_event is not None ] elif self.dataset in [Dataset.Transactions, Dataset.IssuePlatform]: project_ids = [ proj.id for proj in self.params.projects if proj.flags.has_transactions ] else: return super().resolve_projects() if len(project_ids) == 0: raise InvalidSearchQuery( "All the projects in your query haven't received data yet, so no query was ran" ) else: return project_ids else: return super().resolve_projects() def get_function_result_type( self, function: str, ) -> str | None: if function in constants.TREND_FUNCTION_TYPE_MAP: # HACK: Don't invalid query here if we don't recognize the function # this is cause non-snql tests still need to run and will check here # TODO: once non-snql is removed and trends has its own builder this # can be removed return constants.TREND_FUNCTION_TYPE_MAP.get(function) return super().get_function_result_type(function) def format_search_filter(self, term: event_search.SearchFilter) -> WhereType | None: """For now this function seems a bit redundant inside QueryFilter but most of the logic from the existing format_search_filter hasn't been converted over yet """ name = term.key.name converted_filter = self.convert_search_filter_to_condition( event_search.SearchFilter( # We want to use group_id elsewhere so shouldn't be removed from the dataset # but if a user has a tag with the same name we want to make sure that works event_search.SearchKey("tags[group_id]" if name == "group_id" else name), term.operator, term.value, ) ) return converted_filter if converted_filter else None def default_filter_converter( self, search_filter: event_search.SearchFilter ) -> WhereType | None: name = search_filter.key.name operator = search_filter.operator value = search_filter.value.value # Some fields aren't valid queries if name in constants.SKIP_FILTER_RESOLUTION: name = f"tags[{name}]" if name in constants.TIMESTAMP_FIELDS: if not self.start or not self.end: raise InvalidSearchQuery( f"Cannot query the {name} field without a valid date range" ) if ( operator in ["<", "<="] and value < self.start or operator in [">", ">="] and value > self.end ): raise InvalidSearchQuery( "Filter on timestamp is outside of the selected date range." ) return super().default_filter_converter(search_filter)
DiscoverQueryBuilder
python
ipython__ipython
docs/autogen_shortcuts.py
{ "start": 779, "end": 840 }
class ____: handler: Handler shortcut: Shortcut
Binding
python
facebookresearch__faiss
tests/test_contrib.py
{ "start": 7264, "end": 9450 }
class ____(unittest.TestCase): def test_precision_recall(self): Iref = [ [1, 2, 3], [5, 6], [], [] ] Inew = [ [1, 2], [6, 7], [1], [] ] lims_ref = np.cumsum([0] + [len(x) for x in Iref]) Iref = np.hstack(Iref) lims_new = np.cumsum([0] + [len(x) for x in Inew]) Inew = np.hstack(Inew) precision, recall = evaluation.range_PR(lims_ref, Iref, lims_new, Inew) self.assertEqual(precision, 0.6) self.assertEqual(recall, 0.6) def test_PR_multiple(self): metric = faiss.METRIC_L2 ds = datasets.SyntheticDataset(32, 1000, 1000, 10) xq = ds.get_queries() xb = ds.get_database() # good for ~10k results threshold = 15 index = faiss.IndexFlat(32, metric) index.add(xb) ref_lims, ref_D, ref_I = index.range_search(xq, threshold) # now make a slightly suboptimal index index2 = faiss.index_factory(32, "PCA16,Flat") index2.train(ds.get_train()) index2.add(xb) # PCA reduces distances so will have more results new_lims, new_D, new_I = index2.range_search(xq, threshold) all_thr = np.array([5.0, 10.0, 12.0, 15.0]) for mode in "overall", "average": ref_precisions = np.zeros_like(all_thr) ref_recalls = np.zeros_like(all_thr) for i, thr in enumerate(all_thr): lims2, _, I2 = evaluation.filter_range_results( new_lims, new_D, new_I, thr) prec, recall = evaluation.range_PR( ref_lims, ref_I, lims2, I2, mode=mode) ref_precisions[i] = prec ref_recalls[i] = recall precisions, recalls = evaluation.range_PR_multiple_thresholds( ref_lims, ref_I, new_lims, new_D, new_I, all_thr, mode=mode ) np.testing.assert_array_almost_equal(ref_precisions, precisions) np.testing.assert_array_almost_equal(ref_recalls, recalls)
TestRangeEval
python
pallets__jinja
src/jinja2/sandbox.py
{ "start": 14258, "end": 14864 }
class ____(Formatter): def __init__(self, env: Environment, **kwargs: t.Any) -> None: self._env = env super().__init__(**kwargs) def get_field( self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any] ) -> tuple[t.Any, str]: first, rest = formatter_field_name_split(field_name) obj = self.get_value(first, args, kwargs) for is_attr, i in rest: if is_attr: obj = self._env.getattr(obj, i) else: obj = self._env.getitem(obj, i) return obj, first
SandboxedFormatter
python
PrefectHQ__prefect
src/prefect/utilities/schema_tools/hydration.py
{ "start": 2833, "end": 2932 }
class ____(KeyNotFound): @property def key(self) -> str: return "value"
ValueNotFound
python
keras-team__keras
keras/src/saving/saving_lib_test.py
{ "start": 34777, "end": 35056 }
class ____: def __init__(self, factor): self.factor = factor def __call__(self, inputs): return inputs * self.factor def get_config(self): return {"factor": self.factor} @keras.saving.register_keras_serializable(package="Complex")
GrowthFactor
python
pytorch__pytorch
test/dynamo/test_repros.py
{ "start": 27793, "end": 29010 }
class ____(nn.Module): def __init__( self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=nn.ReLU(), layer_norm_eps=1e-5, ): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.ff_block = FeedForwardLayer(d_model, dim_feedforward, activation, dropout) def forward(self, src, src_mask=None, src_key_padding_mask=None): x = src x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask)) x = self.norm2(x + self._ff_block(x)) return x # self-attention block def _sa_block(self, x, attn_mask, key_padding_mask): x = self.self_attn( x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False, )[0] return self.dropout(x) # feed forward block def _ff_block(self, x): return self.ff_block(x)
TransformerEncoderLayer
python
dagster-io__dagster
python_modules/libraries/dagster-fivetran/dagster_fivetran/translator.py
{ "start": 1383, "end": 1626 }
class ____(Enum): """Enum representing each setup state for a connector in Fivetran's ontology.""" INCOMPLETE = "incomplete" CONNECTED = "connected" BROKEN = "broken" @whitelist_for_serdes @record
FivetranConnectorSetupStateType
python
rushter__MLAlgorithms
mla/neuralnet/layers/recurrent/rnn.py
{ "start": 257, "end": 3540 }
class ____(Layer, ParamMixin): """Vanilla RNN.""" def __init__( self, hidden_dim, activation="tanh", inner_init="orthogonal", parameters=None, return_sequences=True, ): self.return_sequences = return_sequences self.hidden_dim = hidden_dim self.inner_init = get_initializer(inner_init) self.activation = get_activation(activation) self.activation_d = elementwise_grad(self.activation) if parameters is None: self._params = Parameters() else: self._params = parameters self.last_input = None self.states = None self.hprev = None self.input_dim = None def setup(self, x_shape): """ Parameters ---------- x_shape : np.array(batch size, time steps, input shape) """ self.input_dim = x_shape[2] # Input -> Hidden self._params["W"] = self._params.init((self.input_dim, self.hidden_dim)) # Bias self._params["b"] = np.full((self.hidden_dim,), self._params.initial_bias) # Hidden -> Hidden layer self._params["U"] = self.inner_init((self.hidden_dim, self.hidden_dim)) # Init gradient arrays self._params.init_grad() self.hprev = np.zeros((x_shape[0], self.hidden_dim)) def forward_pass(self, X): self.last_input = X n_samples, n_timesteps, input_shape = X.shape states = np.zeros((n_samples, n_timesteps + 1, self.hidden_dim)) states[:, -1, :] = self.hprev.copy() p = self._params for i in range(n_timesteps): states[:, i, :] = np.tanh( np.dot(X[:, i, :], p["W"]) + np.dot(states[:, i - 1, :], p["U"]) + p["b"] ) self.states = states self.hprev = states[:, n_timesteps - 1, :].copy() if self.return_sequences: return states[:, 0:-1, :] else: return states[:, -2, :] def backward_pass(self, delta): if len(delta.shape) == 2: delta = delta[:, np.newaxis, :] n_samples, n_timesteps, input_shape = delta.shape p = self._params # Temporal gradient arrays grad = {k: np.zeros_like(p[k]) for k in p.keys()} dh_next = np.zeros((n_samples, input_shape)) output = np.zeros((n_samples, n_timesteps, self.input_dim)) # Backpropagation through time for i in reversed(range(n_timesteps)): dhi = self.activation_d(self.states[:, i, :]) * (delta[:, i, :] + dh_next) grad["W"] += np.dot(self.last_input[:, i, :].T, dhi) grad["b"] += delta[:, i, :].sum(axis=0) grad["U"] += np.dot(self.states[:, i - 1, :].T, dhi) dh_next = np.dot(dhi, p["U"].T) d = np.dot(delta[:, i, :], p["U"].T) output[:, i, :] = np.dot(d, p["W"].T) # Change actual gradient arrays for k in grad.keys(): self._params.update_grad(k, grad[k]) return output def shape(self, x_shape): if self.return_sequences: return x_shape[0], x_shape[1], self.hidden_dim else: return x_shape[0], self.hidden_dim
RNN
python
ray-project__ray
python/ray/data/_internal/execution/bundle_queue/fifo_bundle_queue.py
{ "start": 377, "end": 4610 }
class ____(BundleQueue): """A bundle queue that follows a first-in-first-out policy.""" def __init__(self): # We manually implement a linked list because we need to remove elements # efficiently, and Python's built-in data structures have O(n) removal time. self._head: Optional[_Node] = None self._tail: Optional[_Node] = None # We use a dictionary to keep track of the nodes corresponding to each bundle. # This allows us to remove a bundle from the queue in O(1) time. We need a list # because a bundle can be added to the queue multiple times. Nodes in each list # are insertion-ordered. self._bundle_to_nodes: Dict["RefBundle", List[_Node]] = defaultdict(deque) self._nbytes = 0 self._num_blocks = 0 self._num_bundles = 0 def __len__(self) -> int: return self._num_bundles def __contains__(self, bundle: "RefBundle") -> bool: return bundle in self._bundle_to_nodes def add(self, bundle: "RefBundle") -> None: """Add a bundle to the end (right) of the queue.""" new_node = _Node(value=bundle, next=None, prev=self._tail) # Case 1: The queue is empty. if self._head is None: assert self._tail is None self._head = new_node self._tail = new_node # Case 2: The queue has at least one element. else: self._tail.next = new_node self._tail = new_node self._bundle_to_nodes[bundle].append(new_node) self._nbytes += bundle.size_bytes() self._num_blocks += len(bundle.block_refs) self._num_bundles += 1 def get_next(self) -> "RefBundle": """Return the first (left) bundle in the queue.""" # Case 1: The queue is empty. if not self._head: raise IndexError("You can't pop from an empty queue") bundle = self._head.value self.remove(bundle) return bundle def has_next(self) -> bool: return self._num_bundles > 0 def peek_next(self) -> Optional["RefBundle"]: """Return the first (left) bundle in the queue without removing it.""" if self._head is None: return None return self._head.value def remove(self, bundle: "RefBundle"): """Remove a bundle from the queue. If there are multiple instances of the bundle in the queue, this method only removes the first one. """ # Case 1: The queue is empty. if bundle not in self._bundle_to_nodes: raise ValueError(f"The bundle {bundle} is not in the queue.") node = self._bundle_to_nodes[bundle].popleft() if not self._bundle_to_nodes[bundle]: del self._bundle_to_nodes[bundle] # Case 2: The bundle is the only element in the queue. if self._head is self._tail: self._head = None self._tail = None # Case 3: The bundle is the first element in the queue. elif node is self._head: self._head = node.next self._head.prev = None # Case 4: The bundle is the last element in the queue. elif node is self._tail: self._tail = node.prev self._tail.next = None # Case 5: The bundle is in the middle of the queue. else: node.prev.next = node.next node.next.prev = node.prev self._num_bundles -= 1 self._num_blocks -= len(bundle) self._nbytes -= bundle.size_bytes() assert self._nbytes >= 0, ( "Expected the total size of objects in the queue to be non-negative, but " f"got {self._nbytes} bytes instead." ) return node.value def clear(self): self._head = None self._tail = None self._bundle_to_nodes.clear() self._num_bundles = 0 self._num_blocks = 0 self._nbytes = 0 def estimate_size_bytes(self) -> int: return self._nbytes def num_blocks(self) -> int: return self._num_blocks def is_empty(self): return not self._bundle_to_nodes and self._head is None and self._tail is None
FIFOBundleQueue
python
pytorch__pytorch
torch/utils/data/datapipes/dataframe/dataframes.py
{ "start": 10085, "end": 10395 }
class ____(Capture): def __init__(self, left, right, ctx) -> None: self.ctx = ctx self.left = left self.right = right def __str__(self) -> str: return f"{self.left} + {self.right}" def execute(self): return get_val(self.left) + get_val(self.right)
CaptureAdd
python
django__django
django/contrib/gis/gdal/raster/source.py
{ "start": 929, "end": 1740 }
class ____(list): indices = { "origin": (0, 3), "scale": (1, 5), "skew": (2, 4), } def __init__(self, raster, prop): x = raster.geotransform[self.indices[prop][0]] y = raster.geotransform[self.indices[prop][1]] super().__init__([x, y]) self._raster = raster self._prop = prop @property def x(self): return self[0] @x.setter def x(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][0]] = value self._raster.geotransform = gtf @property def y(self): return self[1] @y.setter def y(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][1]] = value self._raster.geotransform = gtf
TransformPoint
python
dagster-io__dagster
python_modules/libraries/dagster-aws/dagster_aws/athena/resources.py
{ "start": 8093, "end": 9241 }
class ____(ConfigurableResource): workgroup: str = Field( default="primary", description=( "The Athena WorkGroup to use." " https://docs.aws.amazon.com/athena/latest/ug/manage-queries-control-costs-with-workgroups.html" ), ) polling_interval: int = Field( default=5, description=( "Time in seconds between checks to see if a query execution is finished. 5 seconds" " by default. Must be non-negative." ), ) max_polls: int = Field( default=120, description=( "Number of times to poll before timing out. 120 attempts by default. When coupled" " with the default polling_interval, queries will timeout after 10 minutes (120 * 5" " seconds). Must be greater than 0." ), ) aws_access_key_id: Optional[str] = Field( default=None, description="AWS access key ID for authentication purposes." ) aws_secret_access_key: Optional[str] = Field( default=None, description="AWS secret access key for authentication purposes." )
ResourceWithAthenaConfig
python
scikit-learn__scikit-learn
sklearn/covariance/_shrunk_covariance.py
{ "start": 15767, "end": 23335 }
class ____(EmpiricalCovariance): """LedoitWolf Estimator. Ledoit-Wolf is a particular form of shrinkage, where the shrinkage coefficient is computed using O. Ledoit and M. Wolf's formula as described in "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices", Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2, February 2004, pages 365-411. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- store_precision : bool, default=True Specify if the estimated precision is stored. assume_centered : bool, default=False If True, data will not be centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If False (default), data will be centered before computation. block_size : int, default=1000 Size of blocks into which the covariance matrix will be split during its Ledoit-Wolf estimation. This is purely a memory optimization and does not affect results. Attributes ---------- covariance_ : ndarray of shape (n_features, n_features) Estimated covariance matrix. location_ : ndarray of shape (n_features,) Estimated location, i.e. the estimated mean. precision_ : ndarray of shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) shrinkage_ : float Coefficient in the convex combination used for the computation of the shrunk estimate. Range is [0, 1]. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- EllipticEnvelope : An object for detecting outliers in a Gaussian distributed dataset. EmpiricalCovariance : Maximum likelihood covariance estimator. GraphicalLasso : Sparse inverse covariance estimation with an l1-penalized estimator. GraphicalLassoCV : Sparse inverse covariance with cross-validated choice of the l1 penalty. MinCovDet : Minimum Covariance Determinant (robust estimator of covariance). OAS : Oracle Approximating Shrinkage Estimator. ShrunkCovariance : Covariance estimator with shrinkage. Notes ----- The regularised covariance is: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features and shrinkage is given by the Ledoit and Wolf formula (see References) References ---------- "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices", Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2, February 2004, pages 365-411. Examples -------- >>> import numpy as np >>> from sklearn.covariance import LedoitWolf >>> real_cov = np.array([[.4, .2], ... [.2, .8]]) >>> np.random.seed(0) >>> X = np.random.multivariate_normal(mean=[0, 0], ... cov=real_cov, ... size=50) >>> cov = LedoitWolf().fit(X) >>> cov.covariance_ array([[0.4406, 0.1616], [0.1616, 0.8022]]) >>> cov.location_ array([ 0.0595 , -0.0075]) See also :ref:`sphx_glr_auto_examples_covariance_plot_covariance_estimation.py` and :ref:`sphx_glr_auto_examples_covariance_plot_lw_vs_oas.py` for more detailed examples. """ _parameter_constraints: dict = { **EmpiricalCovariance._parameter_constraints, "block_size": [Interval(Integral, 1, None, closed="left")], } def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000): super().__init__( store_precision=store_precision, assume_centered=assume_centered ) self.block_size = block_size @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Fit the Ledoit-Wolf shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ # Not calling the parent object to fit, to avoid computing the # covariance matrix (and potentially the precision) X = validate_data(self, X) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance, shrinkage = _ledoit_wolf( X - self.location_, assume_centered=True, block_size=self.block_size ) self.shrinkage_ = shrinkage self._set_covariance(covariance) return self # OAS estimator @validate_params( {"X": ["array-like"]}, prefer_skip_nested_validation=False, ) def oas(X, *, assume_centered=False): """Estimate covariance with the Oracle Approximating Shrinkage. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : bool, default=False If True, data will not be centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data will be centered before computation. Returns ------- shrunk_cov : array-like of shape (n_features, n_features) Shrunk covariance. shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularised covariance is: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features), where mu = trace(cov) / n_features and shrinkage is given by the OAS formula (see [1]_). The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In the original article, formula (23) states that 2/p (p being the number of features) is multiplied by Trace(cov*cov) in both the numerator and denominator, but this operation is omitted because for a large p, the value of 2/p is so small that it doesn't affect the value of the estimator. References ---------- .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.", Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. <0907.4698>` Examples -------- >>> import numpy as np >>> from sklearn.covariance import oas >>> rng = np.random.RandomState(0) >>> real_cov = [[.8, .3], [.3, .4]] >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) >>> shrunk_cov, shrinkage = oas(X) >>> shrunk_cov array([[0.7533, 0.2763], [0.2763, 0.3964]]) >>> shrinkage np.float64(0.0195) """ estimator = OAS( assume_centered=assume_centered, ).fit(X) return estimator.covariance_, estimator.shrinkage_
LedoitWolf
python
great-expectations__great_expectations
great_expectations/metrics/column/mean.py
{ "start": 128, "end": 179 }
class ____(MetricResult[float]): ...
ColumnMeanResult
python
getsentry__sentry
src/sentry/api/serializers/models/project.py
{ "start": 23611, "end": 23682 }
class ____(TypedDict, total=False): team: TeamResponseDict
_MaybeTeam
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/hooks/dlp.py
{ "start": 2052, "end": 67346 }
class ____(GoogleBaseHook): """ Hook for Google Cloud Data Loss Prevention (DLP) APIs. Cloud DLP allows clients to detect the presence of Personally Identifiable Information (PII) and other privacy-sensitive data in user-supplied, unstructured data streams, like text blocks or images. The service also includes methods for sensitive data redaction and scheduling of data scans on Google Cloud based data sets. :param gcp_conn_id: The connection ID to use when fetching connection info. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account. """ def __init__( self, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__( gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain, **kwargs, ) self._client: DlpServiceClient | None = None def get_conn(self) -> DlpServiceClient: """ Provide a client for interacting with the Cloud DLP API. :return: Google Cloud DLP API Client """ if not self._client: self._client = DlpServiceClient(credentials=self.get_credentials(), client_info=CLIENT_INFO) return self._client def _project_deidentify_template_path(self, project_id, template_id): return f"{DlpServiceClient.common_project_path(project_id)}/deidentifyTemplates/{template_id}" def _project_stored_info_type_path(self, project_id, info_type_id): return f"{DlpServiceClient.common_project_path(project_id)}/storedInfoTypes/{info_type_id}" def _project_inspect_template_path(self, project_id, inspect_template_id): return f"{DlpServiceClient.common_project_path(project_id)}/inspectTemplates/{inspect_template_id}" @GoogleBaseHook.fallback_to_default_project_id def cancel_dlp_job( self, dlp_job_id: str, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> None: """ Start asynchronous cancellation on a long-running DLP job. :param dlp_job_id: ID of the DLP job resource to be cancelled. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not dlp_job_id: raise AirflowException("Please provide the ID of the DLP job resource to be cancelled.") name = DlpServiceClient.dlp_job_path(project_id, dlp_job_id) client.cancel_dlp_job( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) def create_deidentify_template( self, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, deidentify_template: dict | DeidentifyTemplate | None = None, template_id: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> DeidentifyTemplate: """ Create a deidentify template to reuse frequently-used configurations for content, images, and storage. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param deidentify_template: (Optional) The de-identify template to create. :param template_id: (Optional) The template ID. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: parent = DlpServiceClient.common_organization_path(organization_id) elif project_id: parent = DlpServiceClient.common_project_path(project_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.create_deidentify_template( request={ "parent": parent, "deidentify_template": deidentify_template, "template_id": template_id, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def create_dlp_job( self, project_id: str = PROVIDE_PROJECT_ID, inspect_job: dict | InspectJobConfig | None = None, risk_job: dict | RiskAnalysisJobConfig | None = None, job_id: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), wait_until_finished: bool = True, time_to_sleep_in_seconds: int = 60, ) -> DlpJob: """ Create a new job to inspect storage or calculate risk metrics. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param inspect_job: (Optional) The configuration for the inspect job. :param risk_job: (Optional) The configuration for the risk job. :param job_id: (Optional) The job ID. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. :param wait_until_finished: (Optional) If true, it will keep polling the job state until it is set to DONE. :param time_to_sleep_in_seconds: (Optional) Time to sleep, in seconds, between active checks of the operation results. Defaults to 60. """ client = self.get_conn() parent = DlpServiceClient.common_project_path(project_id) job = client.create_dlp_job( request={ "parent": parent, "inspect_job": inspect_job, "risk_job": risk_job, "job_id": job_id, }, retry=retry, timeout=timeout, metadata=metadata, ) if wait_until_finished: pattern = re.compile(DLP_JOB_PATH_PATTERN, re.IGNORECASE) match = pattern.match(job.name) if match is not None: job_name = match.groupdict()["job"] else: raise AirflowException(f"Unable to retrieve DLP job's ID from {job.name}.") while wait_until_finished: job = self.get_dlp_job(dlp_job_id=job_name, project_id=project_id) self.log.info("DLP job %s state: %s.", job.name, job.state) if job.state == DlpJob.JobState.DONE: return job if job.state in [ DlpJob.JobState.PENDING, DlpJob.JobState.RUNNING, DlpJob.JobState.JOB_STATE_UNSPECIFIED, ]: time.sleep(time_to_sleep_in_seconds) else: raise AirflowException( "Stopped polling DLP job state. " f"DLP job {job.name} state: {DlpJob.JobState.Name(job.state)}." ) return job def create_inspect_template( self, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, inspect_template: InspectTemplate | None = None, template_id: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> InspectTemplate: """ Create an inspect template to reuse frequently used configurations for content, images, and storage. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param inspect_template: (Optional) The inspect template to create. :param template_id: (Optional) The template ID. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: parent = DlpServiceClient.common_organization_path(organization_id) elif project_id: parent = DlpServiceClient.common_project_path(project_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.create_inspect_template( request={ "parent": parent, "inspect_template": inspect_template, "template_id": template_id, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def create_job_trigger( self, project_id: str = PROVIDE_PROJECT_ID, job_trigger: dict | JobTrigger | None = None, trigger_id: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> JobTrigger: """ Create a job trigger to run DLP actions such as scanning storage for sensitive info on a set schedule. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param job_trigger: (Optional) The job trigger to create. :param trigger_id: (Optional) The job trigger ID. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() parent = DlpServiceClient.common_project_path(project_id) return client.create_job_trigger( request={ "parent": parent, "job_trigger": job_trigger, "trigger_id": trigger_id, }, retry=retry, timeout=timeout, metadata=metadata, ) def create_stored_info_type( self, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, config: dict | StoredInfoTypeConfig | None = None, stored_info_type_id: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> StoredInfoType: """ Create a pre-built stored info type to be used for inspection. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param config: (Optional) The config for the stored info type. :param stored_info_type_id: (Optional) The stored info type ID. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: parent = DlpServiceClient.common_organization_path(organization_id) elif project_id: parent = DlpServiceClient.common_project_path(project_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.create_stored_info_type( request={ "parent": parent, "config": config, "stored_info_type_id": stored_info_type_id, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def deidentify_content( self, project_id: str = PROVIDE_PROJECT_ID, deidentify_config: dict | DeidentifyConfig | None = None, inspect_config: dict | InspectConfig | None = None, item: dict | ContentItem | None = None, inspect_template_name: str | None = None, deidentify_template_name: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> DeidentifyContentResponse: """ De-identifies potentially sensitive info from a content item; limits input size and output size. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param deidentify_config: (Optional) Configuration for the de-identification of the content item. Items specified here will override the template referenced by the deidentify_template_name argument. :param inspect_config: (Optional) Configuration for the inspector. Items specified here will override the template referenced by the inspect_template_name argument. :param item: (Optional) The item to de-identify. Will be treated as text. :param inspect_template_name: (Optional) Optional template to use. Any configuration directly specified in inspect_config will override those set in the template. :param deidentify_template_name: (Optional) Optional template to use. Any configuration directly specified in deidentify_config will override those set in the template. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() parent = DlpServiceClient.common_project_path(project_id) return client.deidentify_content( request={ "parent": parent, "deidentify_config": deidentify_config, "inspect_config": inspect_config, "item": item, "inspect_template_name": inspect_template_name, "deidentify_template_name": deidentify_template_name, }, retry=retry, timeout=timeout, metadata=metadata, ) def delete_deidentify_template( self, template_id, organization_id=None, project_id=None, retry=DEFAULT, timeout=None, metadata=() ) -> None: """ Delete a deidentify template. :param template_id: The ID of deidentify template to be deleted. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not template_id: raise AirflowException("Please provide the ID of deidentify template to be deleted.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.deidentify_template_path(organization_id, template_id) elif project_id: name = self._project_deidentify_template_path(project_id, template_id) else: raise AirflowException("Please provide either organization_id or project_id.") client.delete_deidentify_template( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def delete_dlp_job( self, dlp_job_id: str, project_id: str, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> None: """ Delete a long-running DLP job. This method indicates that the client is no longer interested in the DLP job result. The job will be cancelled if possible. :param dlp_job_id: The ID of the DLP job resource to be cancelled. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not dlp_job_id: raise AirflowException("Please provide the ID of the DLP job resource to be cancelled.") name = DlpServiceClient.dlp_job_path(project_id, dlp_job_id) client.delete_dlp_job( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) def delete_inspect_template( self, template_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> None: """ Delete an inspect template. :param template_id: The ID of the inspect template to be deleted. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not template_id: raise AirflowException("Please provide the ID of the inspect template to be deleted.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.inspect_template_path(organization_id, template_id) elif project_id: name = self._project_inspect_template_path(project_id, template_id) else: raise AirflowException("Please provide either organization_id or project_id.") client.delete_inspect_template( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def delete_job_trigger( self, job_trigger_id: str, project_id: str, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> None: """ Delete a job trigger. :param job_trigger_id: The ID of the DLP job trigger to be deleted. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not job_trigger_id: raise AirflowException("Please provide the ID of the DLP job trigger to be deleted.") name = DlpServiceClient.job_trigger_path(project_id, job_trigger_id) client.delete_job_trigger( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) def delete_stored_info_type( self, stored_info_type_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> None: """ Delete a stored info type. :param stored_info_type_id: The ID of the stored info type to be deleted. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not stored_info_type_id: raise AirflowException("Please provide the ID of the stored info type to be deleted.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.stored_info_type_path(organization_id, stored_info_type_id) elif project_id: name = self._project_stored_info_type_path(project_id, stored_info_type_id) else: raise AirflowException("Please provide either organization_id or project_id.") client.delete_stored_info_type( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) def get_deidentify_template( self, template_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> DeidentifyTemplate: """ Get a deidentify template. :param template_id: The ID of deidentify template to be read. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not template_id: raise AirflowException("Please provide the ID of the deidentify template to be read.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.deidentify_template_path(organization_id, template_id) elif project_id: name = self._project_deidentify_template_path(project_id, template_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.get_deidentify_template( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def get_dlp_job( self, dlp_job_id: str, project_id: str, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> DlpJob: """ Get the latest state of a long-running Dlp Job. :param dlp_job_id: The ID of the DLP job resource to be read. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not dlp_job_id: raise AirflowException("Please provide the ID of the DLP job resource to be read.") name = DlpServiceClient.dlp_job_path(project_id, dlp_job_id) return client.get_dlp_job( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) def get_inspect_template( self, template_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> InspectTemplate: """ Get an inspect template. :param template_id: The ID of inspect template to be read. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not template_id: raise AirflowException("Please provide the ID of the inspect template to be read.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.inspect_template_path(organization_id, template_id) elif project_id: name = self._project_inspect_template_path(project_id, template_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.get_inspect_template( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def get_job_trigger( self, job_trigger_id: str, project_id: str, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> JobTrigger: """ Get a DLP job trigger. :param job_trigger_id: The ID of the DLP job trigger to be read. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not job_trigger_id: raise AirflowException("Please provide the ID of the DLP job trigger to be read.") name = DlpServiceClient.job_trigger_path(project_id, job_trigger_id) return client.get_job_trigger( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) def get_stored_info_type( self, stored_info_type_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> StoredInfoType: """ Get a stored info type. :param stored_info_type_id: The ID of the stored info type to be read. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not stored_info_type_id: raise AirflowException("Please provide the ID of the stored info type to be read.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.stored_info_type_path(organization_id, stored_info_type_id) elif project_id: name = self._project_stored_info_type_path(project_id, stored_info_type_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.get_stored_info_type( request={ "name": name, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def inspect_content( self, project_id: str, inspect_config: dict | InspectConfig | None = None, item: dict | ContentItem | None = None, inspect_template_name: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> InspectContentResponse: """ Find potentially sensitive info in content; limits input size, processing time, and output size. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param inspect_config: (Optional) Configuration for the inspector. Items specified here will override the template referenced by the inspect_template_name argument. :param item: (Optional) The item to de-identify. Will be treated as text. :param inspect_template_name: (Optional) Optional template to use. Any configuration directly specified in inspect_config will override those set in the template. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() parent = DlpServiceClient.common_project_path(project_id) return client.inspect_content( request={ "parent": parent, "inspect_config": inspect_config, "item": item, "inspect_template_name": inspect_template_name, }, retry=retry, timeout=timeout, metadata=metadata, ) def list_deidentify_templates( self, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, page_size: int | None = None, order_by: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> list[DeidentifyTemplate]: """ List deidentify templates. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param page_size: (Optional) The maximum number of resources contained in the underlying API response. :param order_by: (Optional) Optional comma separated list of fields to order by, followed by asc or desc postfix. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: parent = DlpServiceClient.common_organization_path(organization_id) elif project_id: parent = DlpServiceClient.common_project_path(project_id) else: raise AirflowException("Please provide either organization_id or project_id.") results = client.list_deidentify_templates( request={ "parent": parent, "page_size": page_size, "order_by": order_by, }, retry=retry, timeout=timeout, metadata=metadata, ) return list(results) @GoogleBaseHook.fallback_to_default_project_id def list_dlp_jobs( self, project_id: str, results_filter: str | None = None, page_size: int | None = None, job_type: str | None = None, order_by: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> list[DlpJob]: """ List DLP jobs that match the specified filter in the request. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param results_filter: (Optional) Filter used to specify a subset of results. :param page_size: (Optional) The maximum number of resources contained in the underlying API response. :param job_type: (Optional) The type of job. :param order_by: (Optional) Optional comma separated list of fields to order by, followed by asc or desc postfix. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() parent = DlpServiceClient.common_project_path(project_id) results = client.list_dlp_jobs( request={ "parent": parent, "filter": results_filter, "page_size": page_size, "type_": job_type, "order_by": order_by, }, retry=retry, timeout=timeout, metadata=metadata, ) return list(results) def list_info_types( self, language_code: str | None = None, results_filter: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> ListInfoTypesResponse: """ Return a list of the sensitive information types that the DLP API supports. :param language_code: (Optional) Optional BCP-47 language code for localized info type friendly names. If omitted, or if localized strings are not available, en-US strings will be returned. :param results_filter: (Optional) Filter used to specify a subset of results. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() return client.list_info_types( request={ "language_code": language_code, "filter": results_filter, }, retry=retry, timeout=timeout, metadata=metadata, ) def list_inspect_templates( self, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, page_size: int | None = None, order_by: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> list[InspectTemplate]: """ List inspect templates. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param page_size: (Optional) The maximum number of resources contained in the underlying API response. :param order_by: (Optional) Optional comma separated list of fields to order by, followed by asc or desc postfix. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: parent = DlpServiceClient.common_organization_path(organization_id) elif project_id: parent = DlpServiceClient.common_project_path(project_id) else: raise AirflowException("Please provide either organization_id or project_id.") results = client.list_inspect_templates( request={ "parent": parent, "page_size": page_size, "order_by": order_by, }, retry=retry, timeout=timeout, metadata=metadata, ) return list(results) @GoogleBaseHook.fallback_to_default_project_id def list_job_triggers( self, project_id: str, page_size: int | None = None, order_by: str | None = None, results_filter: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> list[JobTrigger]: """ List job triggers. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param page_size: (Optional) The maximum number of resources contained in the underlying API response. :param order_by: (Optional) Optional comma separated list of fields to order by, followed by asc or desc postfix. :param results_filter: (Optional) Filter used to specify a subset of results. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() parent = DlpServiceClient.common_project_path(project_id) results = client.list_job_triggers( request={ "parent": parent, "page_size": page_size, "order_by": order_by, "filter": results_filter, }, retry=retry, timeout=timeout, metadata=metadata, ) return list(results) def list_stored_info_types( self, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, page_size: int | None = None, order_by: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> list[StoredInfoType]: """ List stored info types. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param page_size: (Optional) The maximum number of resources contained in the underlying API response. :param order_by: (Optional) Optional comma separated list of fields to order by, followed by asc or desc postfix. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: parent = DlpServiceClient.common_organization_path(organization_id) elif project_id: parent = DlpServiceClient.common_project_path(project_id) else: raise AirflowException("Please provide either organization_id or project_id.") results = client.list_stored_info_types( request={ "parent": parent, "page_size": page_size, "order_by": order_by, }, retry=retry, timeout=timeout, metadata=metadata, ) return list(results) @GoogleBaseHook.fallback_to_default_project_id def redact_image( self, project_id: str, inspect_config: dict | InspectConfig | None = None, image_redaction_configs: None | (list[dict] | list[RedactImageRequest.ImageRedactionConfig]) = None, include_findings: bool | None = None, byte_item: dict | ByteContentItem | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> RedactImageResponse: """ Redacts potentially sensitive info from an image; limits input size, processing time, and output size. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param inspect_config: (Optional) Configuration for the inspector. Items specified here will override the template referenced by the inspect_template_name argument. :param image_redaction_configs: (Optional) The configuration for specifying what content to redact from images. list[google.cloud.dlp_v2.types.RedactImageRequest.ImageRedactionConfig] :param include_findings: (Optional) Whether the response should include findings along with the redacted image. :param byte_item: (Optional) The content must be PNG, JPEG, SVG or BMP. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() parent = DlpServiceClient.common_project_path(project_id) return client.redact_image( request={ "parent": parent, "inspect_config": inspect_config, "image_redaction_configs": image_redaction_configs, "include_findings": include_findings, "byte_item": byte_item, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def reidentify_content( self, project_id: str, reidentify_config: dict | DeidentifyConfig | None = None, inspect_config: dict | InspectConfig | None = None, item: dict | ContentItem | None = None, inspect_template_name: str | None = None, reidentify_template_name: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> ReidentifyContentResponse: """ Re-identifies content that has been de-identified. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param reidentify_config: (Optional) Configuration for the re-identification of the content item. :param inspect_config: (Optional) Configuration for the inspector. :param item: (Optional) The item to re-identify. Will be treated as text. :param inspect_template_name: (Optional) Optional template to use. Any configuration directly specified in inspect_config will override those set in the template. :param reidentify_template_name: (Optional) Optional template to use. References an instance of deidentify template. Any configuration directly specified in reidentify_config or inspect_config will override those set in the template. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() parent = DlpServiceClient.common_project_path(project_id) return client.reidentify_content( request={ "parent": parent, "reidentify_config": reidentify_config, "inspect_config": inspect_config, "item": item, "inspect_template_name": inspect_template_name, "reidentify_template_name": reidentify_template_name, }, retry=retry, timeout=timeout, metadata=metadata, ) def update_deidentify_template( self, template_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, deidentify_template: dict | DeidentifyTemplate | None = None, update_mask: dict | FieldMask | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> DeidentifyTemplate: """ Update the deidentify template. :param template_id: The ID of deidentify template to be updated. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param deidentify_template: New deidentify template value. :param update_mask: Mask to control which fields get updated. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not template_id: raise AirflowException("Please provide the ID of deidentify template to be updated.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.deidentify_template_path(organization_id, template_id) elif project_id: name = self._project_deidentify_template_path(project_id, template_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.update_deidentify_template( request={ "name": name, "deidentify_template": deidentify_template, "update_mask": update_mask, }, retry=retry, timeout=timeout, metadata=metadata, ) def update_inspect_template( self, template_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, inspect_template: dict | InspectTemplate | None = None, update_mask: dict | FieldMask | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> InspectTemplate: """ Update the inspect template. :param template_id: The ID of the inspect template to be updated. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param inspect_template: New inspect template value. :param update_mask: Mask to control which fields get updated. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not template_id: raise AirflowException("Please provide the ID of the inspect template to be updated.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.inspect_template_path(organization_id, template_id) elif project_id: name = self._project_inspect_template_path(project_id, template_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.update_inspect_template( request={ "name": name, "inspect_template": inspect_template, "update_mask": update_mask, }, retry=retry, timeout=timeout, metadata=metadata, ) @GoogleBaseHook.fallback_to_default_project_id def update_job_trigger( self, job_trigger_id: str, project_id: str, job_trigger: dict | JobTrigger | None = None, update_mask: dict | FieldMask | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> JobTrigger: """ Update a job trigger. :param job_trigger_id: The ID of the DLP job trigger to be updated. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param job_trigger: New job trigger value. :param update_mask: Mask to control which fields get updated. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if isinstance(job_trigger, dict): job_trigger = JobTrigger(**job_trigger) if isinstance(update_mask, dict): update_mask = FieldMask(**update_mask) if not job_trigger_id: raise AirflowException("Please provide the ID of the DLP job trigger to be updated.") name = DlpServiceClient.job_trigger_path(project_id, job_trigger_id) return client.update_job_trigger( name=name, job_trigger=job_trigger, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata, ) def update_stored_info_type( self, stored_info_type_id: str, organization_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, config: dict | StoredInfoTypeConfig | None = None, update_mask: dict | FieldMask | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), ) -> StoredInfoType: """ Update the stored info type by creating a new version. :param stored_info_type_id: The ID of the stored info type to be updated. :param organization_id: (Optional) The organization ID. Required to set this field if parent resource is an organization. :param project_id: (Optional) Google Cloud project ID where the DLP Instance exists. Only set this field if the parent resource is a project instead of an organization. :param config: Updated configuration for the stored info type. If not provided, a new version of the stored info type will be created with the existing configuration. :param update_mask: Mask to control which fields get updated. :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. """ client = self.get_conn() if not stored_info_type_id: raise AirflowException("Please provide the ID of the stored info type to be updated.") # Handle project_id from connection configuration project_id = project_id or self.project_id if organization_id: name = DlpServiceClient.stored_info_type_path(organization_id, stored_info_type_id) elif project_id: name = self._project_stored_info_type_path(project_id, stored_info_type_id) else: raise AirflowException("Please provide either organization_id or project_id.") return client.update_stored_info_type( request={ "name": name, "config": config, "update_mask": update_mask, }, retry=retry, timeout=timeout, metadata=metadata, )
CloudDLPHook
python
pypa__pipenv
pipenv/patched/pip/_internal/exceptions.py
{ "start": 21256, "end": 24180 }
class ____(DiagnosticPipError): """The current environment is externally managed. This is raised when the current environment is externally managed, as defined by `PEP 668`_. The ``EXTERNALLY-MANAGED`` configuration is checked and displayed when the error is bubbled up to the user. :param error: The error message read from ``EXTERNALLY-MANAGED``. """ reference = "externally-managed-environment" def __init__(self, error: Optional[str]) -> None: if error is None: context = Text(_DEFAULT_EXTERNALLY_MANAGED_ERROR) else: context = Text(error) super().__init__( message="This environment is externally managed", context=context, note_stmt=( "If you believe this is a mistake, please contact your " "Python installation or OS distribution provider. " "You can override this, at the risk of breaking your Python " "installation or OS, by passing --break-system-packages." ), hint_stmt=Text("See PEP 668 for the detailed specification."), ) @staticmethod def _iter_externally_managed_error_keys() -> Iterator[str]: # LC_MESSAGES is in POSIX, but not the C standard. The most common # platform that does not implement this category is Windows, where # using other categories for console message localization is equally # unreliable, so we fall back to the locale-less vendor message. This # can always be re-evaluated when a vendor proposes a new alternative. try: category = locale.LC_MESSAGES except AttributeError: lang: Optional[str] = None else: lang, _ = locale.getlocale(category) if lang is not None: yield f"Error-{lang}" for sep in ("-", "_"): before, found, _ = lang.partition(sep) if not found: continue yield f"Error-{before}" yield "Error" @classmethod def from_config( cls, config: Union[pathlib.Path, str], ) -> "ExternallyManagedEnvironment": parser = configparser.ConfigParser(interpolation=None) try: parser.read(config, encoding="utf-8") section = parser["externally-managed"] for key in cls._iter_externally_managed_error_keys(): with contextlib.suppress(KeyError): return cls(section[key]) except KeyError: pass except (OSError, UnicodeDecodeError, configparser.ParsingError): from pipenv.patched.pip._internal.utils._log import VERBOSE exc_info = logger.isEnabledFor(VERBOSE) logger.warning("Failed to read %s", config, exc_info=exc_info) return cls(None)
ExternallyManagedEnvironment
python
rq__rq
rq/worker.py
{ "start": 75895, "end": 76244 }
class ____(Worker): """ Modified version of Worker that dequeues jobs from the queues using a round-robin strategy. """ def reorder_queues(self, reference_queue): pos = self._ordered_queues.index(reference_queue) self._ordered_queues = self._ordered_queues[pos + 1 :] + self._ordered_queues[: pos + 1]
RoundRobinWorker
python
ray-project__ray
python/ray/serve/_private/test_utils.py
{ "start": 5774, "end": 6970 }
class ____: def __init__(self, deployment_name: str, app_name: str = SERVE_DEFAULT_APP_NAME): self._deployment_name = deployment_name self._app_name = app_name self._protocol = RequestProtocol.UNDEFINED self._running_replicas_populated = False self._initialized = False def is_initialized(self): return self._initialized def _init(self): if self._initialized: raise RuntimeError("already initialized") self._initialized = True def options(self, *args, **kwargs): return self def __eq__(self, dep: Tuple[str]): other_deployment_name, other_app_name = dep return ( self._deployment_name == other_deployment_name and self._app_name == other_app_name ) def _set_request_protocol(self, protocol: RequestProtocol): self._protocol = protocol def _get_or_create_router(self): pass def running_replicas_populated(self) -> bool: return self._running_replicas_populated def set_running_replicas_populated(self, val: bool): self._running_replicas_populated = val @serve.deployment
MockDeploymentHandle
python
dagster-io__dagster
python_modules/dagster-pipes/dagster_pipes/__init__.py
{ "start": 1833, "end": 2051 }
class ____(TypedDict): """Payload generated on startup of the external-side `PipesMessageWriter` containing arbitrary information about the external process. """ extras: Mapping[str, Any]
PipesOpenedData
python
catalyst-team__catalyst
catalyst/metrics/_topk_metric.py
{ "start": 184, "end": 3480 }
class ____(ICallbackBatchMetric): """ Base class for `topk` metrics. Args: metric_name: name of the metric metric_function: metric calculation function topk: list of `topk` for metric@topk computing compute_on_call: if True, computes and returns metric value during metric call prefix: metric prefix suffix: metric suffix """ def __init__( self, metric_name: str, metric_function: Callable, topk: Iterable[int] = None, compute_on_call: bool = True, prefix: str = None, suffix: str = None, ): """Init TopKMetric""" super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix) self.metric_name = metric_name self.metric_function = metric_function self.topk = topk or (1,) self.metrics: List[AdditiveMetric] = [ AdditiveMetric() for _ in range(len(self.topk)) ] def reset(self) -> None: """Reset all fields""" for metric in self.metrics: metric.reset() def update(self, logits: torch.Tensor, targets: torch.Tensor) -> List[float]: """ Update metric value with value for new data and return intermediate metrics values. Args: logits (torch.Tensor): tensor of logits targets (torch.Tensor): tensor of targets Returns: list of metric@k values """ values = self.metric_function(logits, targets, topk=self.topk) values = [v.item() for v in values] for value, metric in zip(values, self.metrics): metric.update(value, len(targets)) return values def update_key_value( self, logits: torch.Tensor, targets: torch.Tensor ) -> Dict[str, float]: """ Update metric value with value for new data and return intermediate metrics values in key-value format. Args: logits (torch.Tensor): tensor of logits targets (torch.Tensor): tensor of targets Returns: dict of metric@k values """ values = self.update(logits=logits, targets=targets) output = { f"{self.prefix}{self.metric_name}{key:02d}{self.suffix}": value for key, value in zip(self.topk, values) } return output def compute(self) -> Any: """ Compute metric for all data Returns: list of mean values, list of std values """ means, stds = zip(*(metric.compute() for metric in self.metrics)) return means, stds def compute_key_value(self) -> Dict[str, float]: """ Compute metric for all data and return results in key-value format Returns: dict of metrics """ means, stds = self.compute() output_mean = { f"{self.prefix}{self.metric_name}{key:02d}{self.suffix}": value for key, value in zip(self.topk, means) } output_std = { f"{self.prefix}{self.metric_name}{key:02d}{self.suffix}/std": value for key, value in zip(self.topk, stds) } return {**output_mean, **output_std} __all__ = ["TopKMetric"]
TopKMetric
python
kamyu104__LeetCode-Solutions
Python/smallest-substring-with-identical-characters-i.py
{ "start": 57, "end": 1035 }
class ____(object): def minLength(self, s, numOps): """ :type s: str :type numOps: int :rtype: int """ def binary_search(left, right, check): while left <= right: mid = left + (right-left)//2 if check(mid): right = mid-1 else: left = mid+1 return left def lengths(): cnt = 0 for i in xrange(len(s)): cnt += 1 if i+1 == len(s) or s[i+1] != s[i]: yield cnt cnt = 0 def check(x): if x == 1: cnt = sum(int(x) != i%2 for i, x in enumerate(s)) return min(cnt, len(s)-cnt) <= numOps return sum(l//(x+1) for l in lengths()) <= numOps return binary_search(1, len(s), check) # Time: O(nlogn) # Space: O(n) # binary search, greedy
Solution
python
pytorch__pytorch
torch/fx/passes/net_min_base.py
{ "start": 705, "end": 867 }
class ____(Exception): """ Raised if failed to split out a minimize module """ @compatibility(is_backward_compatible=False)
FxNetMinimizerBadModuleError
python
huggingface__transformers
examples/modular-transformers/modeling_dummy_bert.py
{ "start": 21811, "end": 22448 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.transform = DummyBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=True) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states @auto_docstring
DummyBertLMPredictionHead
python
weaviate__weaviate-python-client
weaviate/debug/executor.py
{ "start": 315, "end": 1908 }
class ____(Generic[ConnectionType]): def __init__(self, connection: ConnectionType): self._connection = connection def get_object_over_rest( self, collection: str, uuid: UUID, *, consistency_level: Optional[ConsistencyLevel] = None, node_name: Optional[str] = None, tenant: Optional[str] = None, ) -> executor.Result[Optional[DebugRESTObject]]: """Use the REST API endpoint /objects/{className}/{id} to retrieve an object directly from the database without search. The key difference between `debug.get_object_over_rest` and `query.fetch_object_by_id` is the underlying protocol. This method uses REST while that method uses gRPC. """ path = f"/objects/{collection}/{str(uuid)}" params: Dict[str, str] = {} if consistency_level is not None: params["consistency"] = consistency_level.value if node_name is not None: params["node_name"] = node_name if tenant is not None: params["tenant"] = tenant def resp(response: Response) -> Optional[DebugRESTObject]: if response.status_code == 404: return None return DebugRESTObject(**response.json()) return executor.execute( response_callback=resp, method=self._connection.get, path=path, params=params, error_msg="Object was not retrieved", status_codes=_ExpectedStatusCodes(ok_in=[200, 404], error="get object"), )
_DebugExecutor
python
walkccc__LeetCode
solutions/2899. Last Visited Integers/2899.py
{ "start": 0, "end": 308 }
class ____: def lastVisitedIntegers(self, words: list[str]) -> list[int]: ans = [] nums = [] k = 0 for word in words: if word == 'prev': k += 1 ans.append(-1 if k > len(nums) else nums[-k]) else: k = 0 nums.append(int(word)) return ans
Solution
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 147215, "end": 147598 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("field", "direction") field = sgqlc.types.Field( sgqlc.types.non_null(TeamOrderField), graphql_name="field" ) direction = sgqlc.types.Field( sgqlc.types.non_null(OrderDirection), graphql_name="direction" )
TeamOrder
python
faif__python-patterns
patterns/behavioral/observer.py
{ "start": 1609, "end": 1930 }
class ____(Subject): def __init__(self, name: str = "") -> None: super().__init__() self.name = name self._data = 0 @property def data(self) -> int: return self._data @data.setter def data(self, value: int) -> None: self._data = value self.notify()
Data
python
neetcode-gh__leetcode
python/0739-daily-temperatures.py
{ "start": 0, "end": 403 }
class ____: def dailyTemperatures(self, temperatures: List[int]) -> List[int]: res = [0] * len(temperatures) stack = [] # pair: [temp, index] for i, t in enumerate(temperatures): while stack and t > stack[-1][0]: stackT, stackInd = stack.pop() res[stackInd] = i - stackInd stack.append((t, i)) return res
Solution
python
pandas-dev__pandas
asv_bench/benchmarks/reshape.py
{ "start": 1514, "end": 2503 }
class ____: params = ["datetime64[ns, US/Pacific]", "Period[s]"] param_names = ["dtype"] def setup(self, dtype): lev = pd.Index(list("ABCDEFGHIJ")) ri = pd.Index(range(1000)) mi = MultiIndex.from_product([lev, ri], names=["foo", "bar"]) index = date_range("2016-01-01", periods=10000, freq="s", tz="US/Pacific") if dtype == "Period[s]": index = index.tz_localize(None).to_period("s") ser = pd.Series(index, index=mi) df = ser.unstack("bar") # roundtrips -> df.stack().equals(ser) self.ser = ser self.df = df def time_stack(self, dtype): self.df.stack() def time_unstack_fast(self, dtype): # last level -> doesn't have to make copies self.ser.unstack("bar") def time_unstack_slow(self, dtype): # first level -> must make copies self.ser.unstack("foo") def time_transpose(self, dtype): self.df.T
ReshapeExtensionDtype
python
Pylons__pyramid
tests/test_url.py
{ "start": 53446, "end": 53657 }
class ____: pregenerator = None name = 'route' def __init__(self, result='/1/2/3'): self.result = result def generate(self, kw): self.kw = kw return self.result
DummyRoute
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/hooks/base_aws.py
{ "start": 19483, "end": 44687 }
class ____(BaseHook, Generic[BaseAwsConnection]): """ Generic class for interact with AWS. This class provide a thin wrapper around the boto3 Python library. :param aws_conn_id: The Airflow connection used for AWS credentials. If this is None or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param client_type: Reference to :external:py:meth:`boto3.client service_name \ <boto3.session.Session.client>`, e.g. 'emr', 'batch', 's3', etc. Mutually exclusive with ``resource_type``. :param resource_type: Reference to :external:py:meth:`boto3.resource service_name \ <boto3.session.Session.resource>`, e.g. 's3', 'ec2', 'dynamodb', etc. Mutually exclusive with ``client_type``. :param config: Configuration for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ conn_name_attr = "aws_conn_id" default_conn_name = "aws_default" conn_type = "aws" hook_name = "Amazon Web Services" def __init__( self, aws_conn_id: str | None = default_conn_name, verify: bool | str | None = None, region_name: str | None = None, client_type: str | None = None, resource_type: str | None = None, config: Config | dict[str, Any] | None = None, ) -> None: super().__init__() self.aws_conn_id = aws_conn_id self.client_type = client_type self.resource_type = resource_type self._region_name = region_name if isinstance(config, dict): config = Config(**config) self._config = config self._verify = verify @classmethod @return_on_error("Unknown") def _get_provider_version(cls) -> str: """Check the Providers Manager for the package version.""" manager = ProvidersManager() hook = manager.hooks[cls.conn_type] if not hook: # This gets caught immediately, but without it MyPy complains # Item "None" of "Optional[HookInfo]" has no attribute "package_name" # on the following line and static checks fail. raise ValueError(f"Hook info for {cls.conn_type} not found in the Provider Manager.") return manager.providers[hook.package_name].version @staticmethod def _find_operator_class_name(target_function_name: str) -> str | None: """ Given a frame off the stack, return the name of the class that made the call. This method may raise a ValueError or an IndexError. The caller is responsible with catching and handling those. """ stack = inspect.stack() # Find the index of the most recent frame which called the provided function name # and pull that frame off the stack. target_frames = [frame for frame in stack if frame.function == target_function_name] if target_frames: target_frame = target_frames[0][0] else: return None # Get the local variables for that frame. frame_variables = target_frame.f_locals["self"] # Get the class object for that frame. frame_class_object = frame_variables.__class__ # Return the name of the class object. return frame_class_object.__name__ @staticmethod def _find_executor_class_name() -> str | None: """Inspect the call stack looking for any executor classes and returning the first found.""" stack = inspect.stack() # Fetch class objects on all frames, looking for one containing an executor (since it # will inherit from BaseExecutor) for frame in stack: classes = [] for name, obj in frame[0].f_globals.items(): if inspect.isclass(obj): classes.append(name) if "BaseExecutor" in classes: return classes[-1] return None @return_on_error("Unknown") def _get_caller(self, target_function_name: str = "execute") -> str: """Try to determine the caller of this hook. Whether that be an AWS Operator, Sensor or Executor.""" caller = self._find_operator_class_name(target_function_name) if caller == "BaseSensorOperator": # If the result is a BaseSensorOperator, then look for whatever last called "poke". caller = self._find_operator_class_name("poke") if not caller: # Check if we can find an executor caller = self._find_executor_class_name() return caller if caller else "Unknown" @staticmethod @return_on_error("00000000-0000-0000-0000-000000000000") def _generate_dag_key() -> str: """ Generate a DAG key. The Object Identifier (OID) namespace is used to salt the dag_id value. That salted value is used to generate a SHA-1 hash which, by definition, can not (reasonably) be reversed. No personal data can be inferred or extracted from the resulting UUID. """ return generate_uuid(os.environ.get("AIRFLOW_CTX_DAG_ID")) @staticmethod @return_on_error("Unknown") def _get_airflow_version() -> str: """Fetch and return the current Airflow version.""" # This can be a circular import under specific configurations. # Importing locally to either avoid or catch it if it does happen. from airflow import __version__ as airflow_version return airflow_version def _generate_user_agent_extra_field(self, existing_user_agent_extra: str) -> str: user_agent_extra_values = [ f"Airflow/{self._get_airflow_version()}", f"AmPP/{self._get_provider_version()}", f"Caller/{self._get_caller()}", f"DagRunKey/{self._generate_dag_key()}", existing_user_agent_extra or "", ] return " ".join(user_agent_extra_values).strip() @cached_property def conn_config(self) -> AwsConnectionWrapper: """Get the Airflow Connection object and wrap it in helper (cached).""" connection = None if self.aws_conn_id: try: connection = self.get_connection(self.aws_conn_id) except AirflowNotFoundException: self.log.warning( "Unable to find AWS Connection ID '%s', switching to empty.", self.aws_conn_id ) # In the TaskSDK's BaseHook, it only retrieves the connection via task-sdk. Since the AWS system testing infrastructure # doesn't use task-sdk, this leads to an error which we handle below. except ImportError as e: if "SUPERVISOR_COMMS" in str(e): self.log.exception(e) else: raise return AwsConnectionWrapper( conn=connection, region_name=self._region_name, botocore_config=self._config, verify=self._verify, ) def _resolve_service_name(self, is_resource_type: bool = False) -> str: """Resolve service name based on type or raise an error.""" if exactly_one(self.client_type, self.resource_type): # It is possible to write simple conditions, however it make mypy unhappy. if self.client_type: if is_resource_type: raise LookupError("Requested `resource_type`, but `client_type` was set instead.") return self.client_type if self.resource_type: if not is_resource_type: raise LookupError("Requested `client_type`, but `resource_type` was set instead.") return self.resource_type raise ValueError( f"Either client_type={self.client_type!r} or " f"resource_type={self.resource_type!r} must be provided, not both." ) @property def service_name(self) -> str: """Extracted botocore/boto3 service name from hook parameters.""" return self._resolve_service_name(is_resource_type=bool(self.resource_type)) @property def service_config(self) -> dict: """Config for hook-specific service from AWS Connection.""" return self.conn_config.get_service_config(service_name=self.service_name) @property def region_name(self) -> str | None: """AWS Region Name read-only property.""" return self.conn_config.region_name @property def config(self) -> Config: """Configuration for botocore client read-only property.""" return self.conn_config.botocore_config or botocore.config.Config() @property def verify(self) -> bool | str | None: """Verify or not SSL certificates boto3 client/resource read-only property.""" return self.conn_config.verify @cached_property def account_id(self) -> str: """Return associated AWS Account ID.""" return ( self.get_session(region_name=self.region_name) .client( service_name="sts", endpoint_url=self.conn_config.get_service_endpoint_url("sts"), config=self.config, verify=self.verify, ) .get_caller_identity()["Account"] ) def get_session(self, region_name: str | None = None, deferrable: bool = False) -> boto3.session.Session: """Get the underlying boto3.session.Session(region_name=region_name).""" return SessionFactory( conn=self.conn_config, region_name=region_name, config=self.config ).create_session(deferrable=deferrable) def _get_config(self, config: Config | None = None) -> Config: """ No AWS Operators use the config argument to this method. Keep backward compatibility with other users who might use it. """ if config is None: config = deepcopy(self.config) # ignore[union-attr] is required for this block to appease MyPy # because the user_agent_extra field is generated at runtime. user_agent_config = Config( user_agent_extra=self._generate_user_agent_extra_field( existing_user_agent_extra=config.user_agent_extra ) ) return config.merge(user_agent_config) def get_client_type( self, region_name: str | None = None, config: Config | None = None, deferrable: bool = False, ) -> boto3.client: """Get the underlying boto3 client using boto3 session.""" service_name = self._resolve_service_name(is_resource_type=False) session = self.get_session(region_name=region_name, deferrable=deferrable) endpoint_url = self.conn_config.get_service_endpoint_url(service_name=service_name) if not isinstance(session, boto3.session.Session): return session.create_client( service_name=service_name, endpoint_url=endpoint_url, config=self._get_config(config), verify=self.verify, ) return session.client( service_name=service_name, endpoint_url=endpoint_url, config=self._get_config(config), verify=self.verify, ) def get_resource_type( self, region_name: str | None = None, config: Config | None = None, ) -> boto3.resource: """Get the underlying boto3 resource using boto3 session.""" service_name = self._resolve_service_name(is_resource_type=True) session = self.get_session(region_name=region_name) return session.resource( service_name=service_name, endpoint_url=self.conn_config.get_service_endpoint_url(service_name=service_name), config=self._get_config(config), verify=self.verify, ) @cached_property def conn(self) -> BaseAwsConnection: """ Get the underlying boto3 client/resource (cached). :return: boto3.client or boto3.resource """ if self.client_type: return self.get_client_type(region_name=self.region_name) return self.get_resource_type(region_name=self.region_name) @property def async_conn(self): """ [DEPRECATED] Get an aiobotocore client to use for async operations. This property is deprecated. Accessing it in an async context will cause the event loop to block. Use the async method `get_async_conn` instead. """ warnings.warn( "The property `async_conn` is deprecated. Accessing it in an async context will cause the event loop to block. " "Use the async method `get_async_conn` instead.", AirflowProviderDeprecationWarning, stacklevel=2, ) return self._get_async_conn() async def get_async_conn(self): """Get an aiobotocore client to use for async operations.""" # We have to wrap the call `self.get_client_type` in another call `_get_async_conn`, # because one of its arguments `self.region_name` is a `@property` decorated function # calling the cached property `self.conn_config` at the end. return await sync_to_async(self._get_async_conn)() def _get_async_conn(self): if not self.client_type: raise ValueError("client_type must be specified.") return self.get_client_type(region_name=self.region_name, deferrable=True) @cached_property def _client(self) -> botocore.client.BaseClient: conn = self.conn if isinstance(conn, botocore.client.BaseClient): return conn return conn.meta.client @property def conn_client_meta(self) -> ClientMeta: """Get botocore client metadata from Hook connection (cached).""" return self._client.meta @property def conn_region_name(self) -> str: """Get actual AWS Region Name from Hook connection (cached).""" return self.conn_client_meta.region_name @property def conn_partition(self) -> str: """Get associated AWS Region Partition from Hook connection (cached).""" return self.conn_client_meta.partition def get_conn(self) -> BaseAwsConnection: """ Get the underlying boto3 client/resource (cached). Implemented so that caching works as intended. It exists for compatibility with subclasses that rely on a super().get_conn() method. :return: boto3.client or boto3.resource """ # Compat shim return self.conn def get_credentials(self, region_name: str | None = None) -> ReadOnlyCredentials: """ Get the underlying `botocore.Credentials` object. This contains the following authentication attributes: access_key, secret_key and token. By use this method also secret_key and token will mask in tasks logs. """ # Credentials are refreshable, so accessing your access key and # secret key separately can lead to a race condition. # See https://stackoverflow.com/a/36291428/8283373 creds = self.get_session(region_name=region_name).get_credentials().get_frozen_credentials() mask_secret(creds.secret_key) if creds.token: mask_secret(creds.token) return creds def expand_role(self, role: str, region_name: str | None = None) -> str: """ Get the Amazon Resource Name (ARN) for the role. If IAM role is already an IAM role ARN, the value is returned unchanged. :param role: IAM role name or ARN :param region_name: Optional region name to get credentials for :return: IAM role ARN """ if "/" in role: return role session = self.get_session(region_name=region_name) _client = session.client( service_name="iam", endpoint_url=self.conn_config.get_service_endpoint_url("iam"), config=self.config, verify=self.verify, ) return _client.get_role(RoleName=role)["Role"]["Arn"] @staticmethod def retry(should_retry: Callable[[Exception], bool]): """Repeat requests in response to exceeding a temporary quote limit.""" def retry_decorator(fun: Callable): @wraps(fun) def decorator_f(self, *args, **kwargs): retry_args = getattr(self, "retry_args", None) if retry_args is None: return fun(self, *args, **kwargs) multiplier = retry_args.get("multiplier", 1) min_limit = retry_args.get("min", 1) max_limit = retry_args.get("max", 1) stop_after_delay = retry_args.get("stop_after_delay", 10) tenacity_before_logger = tenacity.before_log(self.log, logging.INFO) if self.log else None tenacity_after_logger = tenacity.after_log(self.log, logging.INFO) if self.log else None default_kwargs = { "wait": tenacity.wait_exponential(multiplier=multiplier, max=max_limit, min=min_limit), "retry": tenacity.retry_if_exception(should_retry), "stop": tenacity.stop_after_delay(stop_after_delay), "before": tenacity_before_logger, "after": tenacity_after_logger, } return tenacity.retry(**default_kwargs)(fun)(self, *args, **kwargs) return decorator_f return retry_decorator @classmethod def get_ui_field_behaviour(cls) -> dict[str, Any]: """Return custom UI field behaviour for AWS Connection.""" return { "hidden_fields": ["host", "schema", "port"], "relabeling": { "login": "AWS Access Key ID", "password": "AWS Secret Access Key", }, "placeholders": { "login": "AKIAIOSFODNN7EXAMPLE", "password": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", "extra": json.dumps( { "region_name": "us-east-1", "session_kwargs": {"profile_name": "default"}, "config_kwargs": {"retries": {"mode": "standard", "max_attempts": 10}}, "role_arn": "arn:aws:iam::123456789098:role/role-name", "assume_role_method": "assume_role", "assume_role_kwargs": {"RoleSessionName": "airflow"}, "aws_session_token": "AQoDYXdzEJr...EXAMPLETOKEN", "endpoint_url": "http://localhost:4566", }, indent=2, ), }, } def test_connection(self): """ Test the AWS connection by call AWS STS (Security Token Service) GetCallerIdentity API. .. seealso:: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html """ try: session = self.get_session() conn_info = session.client( service_name="sts", endpoint_url=self.conn_config.get_service_endpoint_url("sts", sts_test_connection=True), ).get_caller_identity() metadata = conn_info.pop("ResponseMetadata", {}) if metadata.get("HTTPStatusCode") != 200: try: return False, json.dumps(metadata) except TypeError: return False, str(metadata) conn_info["credentials_method"] = session.get_credentials().method conn_info["region_name"] = session.region_name return True, ", ".join(f"{k}={v!r}" for k, v in conn_info.items()) except Exception as e: return False, f"{type(e).__name__!r} error occurred while testing connection: {e}" @cached_property def waiter_path(self) -> os.PathLike[str] | None: filename = self.client_type if self.client_type else self.resource_type path = Path(__file__).parents[1].joinpath(f"waiters/{filename}.json").resolve() return path if path.exists() else None def get_waiter( self, waiter_name: str, parameters: dict[str, str] | None = None, config_overrides: dict[str, Any] | None = None, deferrable: bool = False, client=None, ) -> Waiter: """ Get a waiter by name. First checks if there is a custom waiter with the provided waiter_name and uses that if it exists, otherwise it will check the service client for a waiter that matches the name and pass that through. If `deferrable` is True, the waiter will be an AIOWaiter, generated from the client that is passed as a parameter. If `deferrable` is True, `client` must be provided. :param waiter_name: The name of the waiter. The name should exactly match the name of the key in the waiter model file (typically this is CamelCase). :param parameters: will scan the waiter config for the keys of that dict, and replace them with the corresponding value. If a custom waiter has such keys to be expanded, they need to be provided here. Note: cannot be used if parameters are included in config_overrides :param config_overrides: will update values of provided keys in the waiter's config. Only specified keys will be updated. :param deferrable: If True, the waiter is going to be an async custom waiter. An async client must be provided in that case. :param client: The client to use for the waiter's operations """ from airflow.providers.amazon.aws.waiters.base_waiter import BaseBotoWaiter if deferrable and not client: raise ValueError("client must be provided for a deferrable waiter.") if parameters is not None and config_overrides is not None and "acceptors" in config_overrides: raise ValueError('parameters must be None when "acceptors" is included in config_overrides') # Currently, the custom waiter doesn't work with resource_type, only client_type is supported. client = client or self._client if self.waiter_path and (waiter_name in self._list_custom_waiters()): # Technically if waiter_name is in custom_waiters then self.waiter_path must # exist but MyPy doesn't like the fact that self.waiter_path could be None. with open(self.waiter_path) as config_file: config: dict = json.loads(config_file.read()) if config_overrides is not None: config["waiters"][waiter_name].update(config_overrides) config = self._apply_parameters_value(config, waiter_name, parameters) return BaseBotoWaiter(client=client, model_config=config, deferrable=deferrable).waiter( waiter_name ) # If there is no custom waiter found for the provided name, # then try checking the service's official waiters. return client.get_waiter(waiter_name) @staticmethod def _apply_parameters_value(config: dict, waiter_name: str, parameters: dict[str, str] | None) -> dict: """Replace potential jinja templates in acceptors definition.""" # only process the waiter we're going to use to not raise errors for missing params for other waiters. acceptors = config["waiters"][waiter_name]["acceptors"] for a in acceptors: arg = a["argument"] template = jinja2.Template(arg, autoescape=False, undefined=jinja2.StrictUndefined) try: a["argument"] = template.render(parameters or {}) except jinja2.UndefinedError as e: raise AirflowException( f"Parameter was not supplied for templated waiter's acceptor '{arg}'", e ) return config def list_waiters(self) -> list[str]: """Return a list containing the names of all waiters for the service, official and custom.""" return [*self._list_official_waiters(), *self._list_custom_waiters()] def _list_official_waiters(self) -> list[str]: return self._client.waiter_names def _list_custom_waiters(self) -> list[str]: if not self.waiter_path: return [] with open(self.waiter_path) as config_file: model_config = json.load(config_file) return WaiterModel(model_config).waiter_names
AwsGenericHook
python
wandb__wandb
wandb/jupyter.py
{ "start": 9088, "end": 15829 }
class ____: def __init__(self, settings: wandb.Settings) -> None: self.outputs: dict[int, Any] = {} self.settings = settings self.shell = IPython.get_ipython() def save_display(self, exc_count, data_with_metadata): self.outputs[exc_count] = self.outputs.get(exc_count, []) # byte values such as images need to be encoded in base64 # otherwise nbformat.v4.new_output will throw a NotebookValidationError data = data_with_metadata["data"] b64_data = {} for key in data: val = data[key] if isinstance(val, bytes): b64_data[key] = b64encode(val).decode("utf-8") else: b64_data[key] = val self.outputs[exc_count].append( {"data": b64_data, "metadata": data_with_metadata["metadata"]} ) def probe_ipynb(self): """Return notebook as dict or None.""" relpath = self.settings.x_jupyter_path if relpath: if os.path.exists(relpath): with open(relpath) as json_file: data = json.load(json_file) return data colab_ipynb = attempt_colab_load_ipynb() if colab_ipynb: return colab_ipynb kaggle_ipynb = attempt_kaggle_load_ipynb() if kaggle_ipynb and len(kaggle_ipynb["cells"]) > 0: return kaggle_ipynb return def save_ipynb(self) -> bool: if not self.settings.save_code: logger.info("not saving jupyter notebook") return False ret = False try: ret = self._save_ipynb() except Exception: wandb.termerror("Failed to save notebook.") logger.exception("Problem saving notebook.") return ret def _save_ipynb(self) -> bool: relpath = self.settings.x_jupyter_path logger.info("looking for notebook: %s", relpath) if relpath: if os.path.exists(relpath): shutil.copy( relpath, os.path.join( self.settings._tmp_code_dir, os.path.basename(relpath) ), ) return True # TODO: likely only save if the code has changed colab_ipynb = attempt_colab_load_ipynb() if colab_ipynb: try: jupyter_metadata = ( notebook_metadata_from_jupyter_servers_and_kernel_id() ) nb_name = jupyter_metadata["name"] except Exception: nb_name = "colab.ipynb" if not nb_name.endswith(".ipynb"): nb_name += ".ipynb" with open( os.path.join( self.settings._tmp_code_dir, nb_name, ), "w", encoding="utf-8", ) as f: f.write(json.dumps(colab_ipynb)) return True kaggle_ipynb = attempt_kaggle_load_ipynb() if kaggle_ipynb and len(kaggle_ipynb["cells"]) > 0: with open( os.path.join( self.settings._tmp_code_dir, kaggle_ipynb["metadata"]["name"] ), "w", encoding="utf-8", ) as f: f.write(json.dumps(kaggle_ipynb)) return True return False def save_history(self, run: wandb.Run): """This saves all cell executions in the current session as a new notebook.""" try: from nbformat import v4, validator, write # type: ignore except ImportError: wandb.termerror( "The nbformat package was not found." " It is required to save notebook history." ) return # TODO: some tests didn't patch ipython properly? if self.shell is None: return cells = [] hist = list(self.shell.history_manager.get_range(output=True)) if len(hist) <= 1 or not self.settings.save_code: logger.info("not saving jupyter history") return try: for _, execution_count, exc in hist: if exc[1]: # TODO: capture stderr? outputs = [ v4.new_output(output_type="stream", name="stdout", text=exc[1]) ] else: outputs = [] if self.outputs.get(execution_count): for out in self.outputs[execution_count]: outputs.append( v4.new_output( output_type="display_data", data=out["data"], metadata=out["metadata"] or {}, ) ) cells.append( v4.new_code_cell( execution_count=execution_count, source=exc[0], outputs=outputs ) ) if hasattr(self.shell, "kernel"): language_info = self.shell.kernel.language_info else: language_info = {"name": "python", "version": sys.version} logger.info("saving %i cells to _session_history.ipynb", len(cells)) nb = v4.new_notebook( cells=cells, metadata={ "kernelspec": { "display_name": f"Python {sys.version_info[0]}", "name": f"python{sys.version_info[0]}", "language": "python", }, "language_info": language_info, }, ) state_path = os.path.join("code", "_session_history.ipynb") run._set_config_wandb("session_history", state_path) filesystem.mkdir_exists_ok(os.path.join(self.settings.files_dir, "code")) with open( os.path.join(self.settings._tmp_code_dir, "_session_history.ipynb"), "w", encoding="utf-8", ) as f: write(nb, f, version=4) with open( os.path.join(self.settings.files_dir, state_path), "w", encoding="utf-8", ) as f: write(nb, f, version=4) except (OSError, validator.NotebookValidationError): wandb.termerror("Unable to save notebook session history.") logger.exception("Unable to save notebook session history.")
Notebook
python
automl__auto-sklearn
test/test_pipeline/components/regression/test_mlp.py
{ "start": 187, "end": 2944 }
class ____(BaseRegressionComponentTest): # NOTE: `default_boston` # # Github runners seem to indeterministicly fail `test_boston` # meaning 'default_irish_proba_places' needs to be set. # There are known differences to occur on different platforms. # https://github.com/scikit-learn/scikit-learn/issues/13108#issuecomment-461696681 # # We are assuming results are deterministic on a given platform as locally # there is no randomness i.e. performing the same test 100 times yeilds the # same predictions 100 times. # # Github runners indicate that they run on microsoft Azure with DS2-v2. # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#cloud-hosts-for-github-hosted-runners # # These seem to have consistent CPU's so I'm unsure what the underlying reason # for this to randomly fail only sometimes on Github runners # # Edit: If changing, please tracke what values were failing # # Seems there is a consistently different values for boston so: # * include two valuess for n_iter in 'boston_iterative_n_iter' # known-values = [236, 331, 327] # # * decreased places from 6 -> 5 in 'default_boston_{sparse,_iterative_sparse}' # to check for for iterations and expanded the default places for checking # know-values = [-0.10972947168054104, -0.10973142976866268] # # * decreased places from 3 -> 1 in 'default_boston_places' # known-values = [0.29521793994422807, 0.2750079862455884] # # * Include two value for 'boston_n_calls' # known-values = [8, 9] __test__ = True __test__ = True res: Dict[str, Any] = {} res["default_boston"] = 0.2750079862455884 res["default_boston_places"] = 1 res["boston_n_calls"] = [8, 9] res["boston_iterative_n_iter"] = [236, 331, 327] res["default_boston_iterative"] = res["default_boston"] res["default_boston_iterative_places"] = 1 res["default_boston_sparse"] = -0.10972947168054104 res["default_boston_sparse_places"] = 5 res["default_boston_iterative_sparse"] = res["default_boston_sparse"] res["default_boston_iterative_sparse_places"] = res["default_boston_sparse_places"] res["default_diabetes"] = 0.35917389841850555 res["diabetes_n_calls"] = 9 res["diabetes_iterative_n_iter"] = 435 res["default_diabetes_iterative"] = res["default_diabetes"] res["default_diabetes_sparse"] = 0.25573903970369427 res["default_diabetes_iterative_sparse"] = res["default_diabetes_sparse"] sk_mod = sklearn.neural_network.MLPRegressor module = MLPRegressor step_hyperparameter = { "name": "n_iter_", "value": module.get_max_iter(), }
MLPComponentTest
python
skorch-dev__skorch
skorch/tests/test_probabilistic.py
{ "start": 2177, "end": 3071 }
class ____(gpytorch.models.ApproximateGP): """GP regression for variational inference""" def __init__(self, inducing_points, eps=1e-6): variational_distribution = gpytorch.variational.CholeskyVariationalDistribution( inducing_points.size(0)) variational_strategy = gpytorch.variational.VariationalStrategy( self, inducing_points, variational_distribution, learn_inducing_locations=True, ) super().__init__(variational_strategy) self.mean_module = gpytorch.means.ConstantMean() self.covar_module = gpytorch.kernels.ScaleKernel( gpytorch.kernels.RBFKernel(eps=eps)) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
VariationalRegressionModule
python
getsentry__sentry
src/sentry/seer/breakpoints.py
{ "start": 402, "end": 810 }
class ____(TypedDict): project: str # For legacy reasons, the group name is always # transaction even when working with functions. transaction: str aggregate_range_1: float aggregate_range_2: float unweighted_t_value: float unweighted_p_value: float trend_percentage: float absolute_percentage_change: float trend_difference: float breakpoint: int
BreakpointData
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 287486, "end": 288547 }
class ____(sgqlc.types.Input): """Choose which status checks must pass before branches can be merged into a branch that matches this rule. When enabled, commits must first be pushed to another branch, then merged or pushed directly to a branch that matches this rule after status checks have passed. """ __schema__ = github_schema __field_names__ = ("required_status_checks", "strict_required_status_checks_policy") required_status_checks = sgqlc.types.Field( sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null("StatusCheckConfigurationInput"))), graphql_name="requiredStatusChecks", ) """Status checks that are required.""" strict_required_status_checks_policy = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="strictRequiredStatusChecksPolicy") """Whether pull requests targeting a matching branch must be tested with the latest code. This setting will not take effect unless at least one status check is enabled. """
RequiredStatusChecksParametersInput
python
huggingface__transformers
tests/quantization/quark_integration/test_quark.py
{ "start": 1454, "end": 5915 }
class ____(unittest.TestCase): reference_model_name = "unsloth/Meta-Llama-3.1-8B-Instruct" quantized_model_name = "amd/Llama-3.1-8B-Instruct-w-int8-a-int8-sym-test" input_text = "Today I am in Paris and" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Today I am in Paris and I am not in Paris, France\nToday I am in Paris, Illinois") EXPECTED_OUTPUTS.add("Today I am in Paris and I am enjoying the city of light. I am not just any ordinary Paris") EXPECTED_OUTPUTS.add("Today I am in Paris and I am enjoying my day off! The sun is shining, the birds are") EXPECTED_OUTPUTS.add("Today I am in Paris and I'm here to tell you about it. It's a beautiful day,") EXPECTED_OUTPUTS.add("Today I am in Paris and I am not in Paris at all! I am not in Paris, but") EXPECTED_RELATIVE_DIFFERENCE = 1.66 device_map = None @classmethod def setUpClass(cls): """ Setup reference & quantized model """ cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.reference_model_name, dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.reference_model_name, use_fast=True) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.quantized_model_name, dtype=torch.float16, device_map=cls.device_map, ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the accelerator memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ cleanup(torch_device, gc_collect=True) def test_memory_footprint(self): mem_quantized = self.quantized_model.get_memory_footprint() self.assertTrue(self.mem_fp16 / mem_quantized > self.EXPECTED_RELATIVE_DIFFERENCE) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly . """ # This should work if self.device_map is None: _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` self.quantized_model.to(torch.float16) def test_original_dtype(self): r""" A simple test to check if the model successfully stores the original dtype """ self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) self.assertTrue(isinstance(self.quantized_model.model.layers[0].mlp.gate_proj, QParamsLinear)) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") gen_config = GenerationConfig( max_new_tokens=15, min_new_tokens=15, use_cache=True, num_beams=1, do_sample=False, ) # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), generation_config=gen_config) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: self.check_inference_correctness(self.quantized_model) @require_accelerate @require_torch_multi_gpu @require_quark
QuarkTest
python
modin-project__modin
modin/core/execution/dispatching/factories/factories.py
{ "start": 23286, "end": 23617 }
class ____(BaseFactory): @classmethod @doc(_doc_factory_prepare_method, io_module_name="``PandasOnUnidistIO``") def prepare(cls): from modin.core.execution.unidist.implementations.pandas_on_unidist.io import ( PandasOnUnidistIO, ) cls.io_cls = PandasOnUnidistIO
PandasOnUnidistFactory
python
keras-team__keras
keras/src/quantizers/quantizers.py
{ "start": 3586, "end": 5728 }
class ____(Quantizer): def __init__( self, axis, value_range=(-127, 127), epsilon=backend.epsilon(), output_dtype="int8", ): Quantizer.__init__(self, output_dtype=output_dtype) if isinstance(axis, int): axis = (axis,) self.axis = tuple(axis) self.value_range = value_range self.epsilon = epsilon def __call__(self, x): quantized_x, scale = abs_max_quantize( x, self.axis, self.value_range, self.output_dtype, self.epsilon ) return quantized_x, scale def get_config(self): return { "axis": self.axis, "value_range": self.value_range, "epsilon": self.epsilon, "output_dtype": self.output_dtype, } def adjust_and_nudge(min_range, max_range, num_bits, narrow_range): """Adjusts and nudges the quantization range for better accuracy.""" # Use higher precision for the computation. compute_dtype = backend.result_type(min_range.dtype, "float32") min_range = ops.cast(min_range, compute_dtype) max_range = ops.cast(max_range, compute_dtype) quant_max = (1 << num_bits) - 1 quant_min = 0 if not narrow_range else 1 diff_range = ops.subtract(max_range, min_range) # Calculate the scale and ensure it's positive scale = ops.divide(diff_range, quant_max - quant_min) # Re-calculate the inverse to avoid loss of precision inv_scale = ops.divide(quant_max - quant_min, diff_range) # Calculate the zero point from the min range zero_point_from_min = quant_min - ops.divide(min_range, scale) # Ensure zero point is within valid range [0, quant_max] zero_point = ops.clip(zero_point_from_min, quant_min, quant_max) # Nudge zero point if it's very close to an integer nudged_zero_point = ops.round(zero_point) # Calculate nudged limits nudged_min = ops.multiply(ops.subtract(quant_min, nudged_zero_point), scale) nudged_max = ops.multiply(ops.subtract(quant_max, nudged_zero_point), scale) return nudged_min, nudged_max, scale, inv_scale
AbsMaxQuantizer
python
google__pytype
pytype/pattern_matching.py
{ "start": 4767, "end": 7526 }
class ____: """Tracks a set of match options.""" def __init__(self, match_var, ctx): self.match_var: cfg.Variable = match_var self.ctx = ctx self.options: _OptionSet = _OptionSet() self.could_contain_anything: bool = False # The types of the match var within each case branch self.cases: dict[int, _OptionSet] = collections.defaultdict(_OptionSet) self.is_valid: bool = True for d in match_var.data: if isinstance(d, abstract.Unsolvable): self.is_valid = False self.could_contain_anything = True elif isinstance(d, abstract.Instance): self.options.add_instance(d) else: self.options.add_type(d) @property def is_complete(self) -> bool: return self.options.is_complete def get_narrowed_match_var(self, node) -> cfg.Variable: if self.could_contain_anything: return self.match_var.AssignToNewVariable(node) else: narrowed = [] for opt in self.options: if not opt.is_empty: narrowed.append(opt.typ.instantiate(node)) return self.ctx.join_variables(node, narrowed) def cover(self, line, var) -> list[_Value]: ret = [] for d in var.data: if isinstance(d, abstract.Instance): ret += self.options.cover_instance(d) self.cases[line].add_instance(d) else: ret += self.options.cover_type(d) self.cases[line].add_type(d) return ret def cover_from_cmp(self, line, case_var) -> list[_Value]: """Cover cases based on a CMP match.""" ret = [] # If we compare `match_var == constant`, add the type of `constant` to the # current case so that instantiate_case_var can retrieve it. for d in case_var.data: if isinstance(d, abstract.Unsolvable): # Set the case type to Any and invalidate the tracker; we do not know # what we have matched against. ret += self.options.cover_type(d) self.invalidate() elif isinstance(d, abstract.Instance): ret += self.options.cover_instance(d) self.cases[line].add_instance(d) if isinstance(d, abstract.ConcreteValue) and d.pyval is None: # Need to special-case `case None` since it's compiled differently. ret += self.options.cover_type(d.cls) else: # We do not handle whatever case this is; just invalidate the tracker # TODO(mdemello): This is probably an error in the user's code; we # should figure out a way to report it. self.invalidate() return ret def cover_from_none(self, line) -> list[_Value]: cls = self.ctx.convert.none_type self.cases[line].add_type(cls) return self.options.cover_type(cls) def invalidate(self): self.is_valid = False
_OptionTracker
python
apache__airflow
providers/apache/kafka/tests/unit/apache/kafka/sensors/test_kafka.py
{ "start": 1140, "end": 4883 }
class ____: """ Test Sensors """ @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( conn_id="kafka_d", conn_type="kafka", extra=json.dumps( {"socket.timeout.ms": 10, "bootstrap.servers": "localhost:9092", "group.id": "test_group"} ), ) ) def test_await_message_good(self): sensor = AwaitMessageSensor( kafka_config_id="kafka_d", topics=["test"], task_id="test", apply_function=_return_true ) # execute marks the task as deferred with pytest.raises(TaskDeferred): sensor.execute(context={}) def test_await_execute_complete(self): sensor = AwaitMessageSensor( kafka_config_id="kafka_d", topics=["test"], task_id="test", apply_function=_return_true ) assert sensor.execute_complete(context={}, event="test") == "test" def test_await_message_trigger_event(self): sensor = AwaitMessageTriggerFunctionSensor( kafka_config_id="kafka_d", topics=["test"], task_id="test", apply_function=_return_true, event_triggered_function=_return_true, ) # task should immediately come out of deferred with pytest.raises(TaskDeferred): sensor.execute(context={}) def test_await_message_trigger_event_execute_complete(self): sensor = AwaitMessageTriggerFunctionSensor( kafka_config_id="kafka_d", topics=["test"], task_id="test", apply_function=_return_true, event_triggered_function=_return_true, ) # task should immediately come out of deferred with pytest.raises(TaskDeferred): sensor.execute_complete(context={}) def test_await_message_with_timeout_parameter(self): """Test that AwaitMessageSensor accepts timeout parameter.""" sensor = AwaitMessageSensor( kafka_config_id="kafka_d", topics=["test"], task_id="test", apply_function=_return_true, timeout=600, # This should now work without errors ) assert sensor.timeout == 600 def test_await_message_with_soft_fail_parameter(self): """Test that AwaitMessageSensor accepts soft_fail parameter.""" sensor = AwaitMessageSensor( kafka_config_id="kafka_d", topics=["test"], task_id="test", apply_function=_return_true, soft_fail=True, # This should now work without errors ) assert sensor.soft_fail is True def test_await_message_trigger_function_with_timeout_parameter(self): """Test that AwaitMessageTriggerFunctionSensor accepts timeout parameter.""" sensor = AwaitMessageTriggerFunctionSensor( kafka_config_id="kafka_d", topics=["test"], task_id="test", apply_function=_return_true, event_triggered_function=_return_true, timeout=600, ) assert sensor.timeout == 600 def test_await_message_trigger_function_with_soft_fail_parameter(self): """Test that AwaitMessageTriggerFunctionSensor accepts soft_fail parameter.""" sensor = AwaitMessageTriggerFunctionSensor( kafka_config_id="kafka_d", topics=["test"], task_id="test", apply_function=_return_true, event_triggered_function=_return_true, soft_fail=True, ) assert sensor.soft_fail is True
TestSensors
python
altair-viz__altair
altair/vegalite/v6/schema/channels.py
{ "start": 316739, "end": 323310 }
class ____(DatumChannelMixin, core.DatumDef): """ LatitudeDatum schema wrapper. Parameters ---------- bandPosition : float Relative position on a band of a stacked, binned, time unit, or band scale. For example, the marks will be positioned at the beginning of the band if set to ``0``, and at the middle of the band if set to ``0.5``. datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None A constant value in data domain. title : str, :class:`Text`, Sequence[str], None A title for the field. If ``null``, the title will be removed. **Default value:** derived from the field's name and transformation function (``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function, the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the field is binned or has a time unit applied, the applied function is shown in parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``). Otherwise, the title is simply the field name. **Notes**: 1) You can customize the default field title format by providing the `fieldTitle <https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle function via the compile function's options <https://vega.github.io/vega-lite/usage/compile.html#field-title>`__. 2) If both field definition's ``title`` and axis, header, or legend ``title`` are defined, axis/header/legend title will be used. type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson'] The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or ``"nominal"``) for the encoded field or constant value (``datum``). It can also be a ``"geojson"`` type for encoding `'geoshape' <https://vega.github.io/vega-lite/docs/geoshape.html>`__. Vega-Lite automatically infers data types in many cases as discussed below. However, type is required for a field if: (1) the field is not nominal and the field encoding has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal scale for a field with ``bin`` or ``timeUnit``. **Default value:** 1) For a data ``field``, ``"nominal"`` is the default data type unless the field encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or ``timeUnit`` that satisfies the following criteria: * ``"quantitative"`` is the default type if (1) the encoded field contains ``bin`` or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is ``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__. * ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit`` or (2) the specified scale type is a time or utc scale * ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort order <https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__, (2) the specified scale type is an ordinal/point/band scale, or (3) the encoding channel is ``order``. 2) For a constant value in data domain (``datum``): * ``"quantitative"`` if the datum is a number * ``"nominal"`` if the datum is a string * ``"temporal"`` if the datum is `a date time object <https://vega.github.io/vega-lite/docs/datetime.html>`__ **Note:** * Data ``type`` describes the semantics of the data rather than the primitive data types (number, string, etc.). The same primitive data type can have different types of measurement. For example, numeric data can represent quantitative, ordinal, or nominal data. * Data values for a temporal field can be either a date-time string (e.g., ``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a timestamp number (e.g., ``1552199579097``). * When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the ``type`` property can be either ``"quantitative"`` (for using a linear bin scale) or `"ordinal" (for using an ordinal bin scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal" (for using an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property refers to the post-aggregation data type. For example, we can calculate count ``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct", "field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``. * Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have ``type`` as they must have exactly the same type as their primary channels (e.g., ``x``, ``y``). **See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__ documentation. """ _class_is_valid_at_instantiation = False _encoding_name = "latitude" @overload def bandPosition(self, _: float, /) -> LatitudeDatum: ... @overload def title(self, _: str | Sequence[str] | None, /) -> LatitudeDatum: ... @overload def type(self, _: Type_T, /) -> LatitudeDatum: ... def __init__( self, datum, bandPosition: Optional[float] = Undefined, title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined, type: Optional[SchemaBase | Type_T] = Undefined, **kwds, ): super().__init__( datum=datum, bandPosition=bandPosition, title=title, type=type, **kwds ) @with_property_setters
LatitudeDatum
python
Farama-Foundation__Gymnasium
gymnasium/envs/toy_text/blackjack.py
{ "start": 1279, "end": 12532 }
class ____(gym.Env): """ Blackjack is a card game where the goal is to beat the dealer by obtaining cards that sum to closer to 21 (without going over 21) than the dealers cards. ## Description The game starts with the dealer having one face up and one face down card, while the player has two face up cards. All cards are drawn from an infinite deck (i.e. with replacement). The card values are: - Face cards (Jack, Queen, King) have a point value of 10. - Aces can either count as 11 (called a 'usable ace') or 1. - Numerical cards (2-10) have a value equal to their number. The player has the sum of cards held. The player can request additional cards (hit) until they decide to stop (stick) or exceed 21 (bust, immediate loss). After the player sticks, the dealer reveals their facedown card, and draws cards until their sum is 17 or greater. If the dealer goes bust, the player wins. If neither the player nor the dealer busts, the outcome (win, lose, draw) is decided by whose sum is closer to 21. This environment corresponds to the version of the blackjack problem described in Example 5.1 in Reinforcement Learning: An Introduction by Sutton and Barto [<a href="#blackjack_ref">1</a>]. ## Action Space The action shape is `(1,)` in the range `{0, 1}` indicating whether to stick or hit. - 0: Stick - 1: Hit ## Observation Space The observation consists of a 3-tuple containing: the player's current sum, the value of the dealer's one showing card (1-10 where 1 is ace), and whether the player holds a usable ace (0 or 1). The observation is returned as `(int(), int(), int())`. ## Starting State The starting state is initialised with the following values. | Observation | Values | |---------------------------|----------------| | Player current sum | 4, 5, ..., 21 | | Dealer showing card value | 1, 2, ..., 10 | | Usable Ace | 0, 1 | ## Rewards - win game: +1 - lose game: -1 - draw game: 0 - win game with natural blackjack: +1.5 (if <a href="#nat">natural</a> is True) +1 (if <a href="#nat">natural</a> is False) ## Episode End The episode ends if the following happens: - Termination: 1. The player hits and the sum of hand exceeds 21. 2. The player sticks. An ace will always be counted as usable (11) unless it busts the player. ## Information No additional information is returned. ## Arguments ```python import gymnasium as gym gym.make('Blackjack-v1', natural=False, sab=False) ``` <a id="nat"></a>`natural=False`: Whether to give an additional reward for starting with a natural blackjack, i.e. starting with an ace and ten (sum is 21). <a id="sab"></a>`sab=False`: Whether to follow the exact rules outlined in the book by Sutton and Barto. If `sab` is `True`, the keyword argument `natural` will be ignored. If the player achieves a natural blackjack and the dealer does not, the player will win (i.e. get a reward of +1). The reverse rule does not apply. If both the player and the dealer get a natural, it will be a draw (i.e. reward 0). ## References <a id="blackjack_ref"></a>[1] R. Sutton and A. Barto, “Reinforcement Learning: An Introduction” 2020. [Online]. Available: [http://www.incompleteideas.net/book/RLbook2020.pdf](http://www.incompleteideas.net/book/RLbook2020.pdf) ## Version History * v1: Fix the natural handling in Blackjack * v0: Initial version release """ metadata = { "render_modes": ["human", "rgb_array"], "render_fps": 4, } def __init__(self, render_mode: str | None = None, natural=False, sab=False): self.action_space = spaces.Discrete(2) self.observation_space = spaces.Tuple( (spaces.Discrete(32), spaces.Discrete(11), spaces.Discrete(2)) ) # Flag to payout 1.5 on a "natural" blackjack win, like casino rules # Ref: http://www.bicyclecards.com/how-to-play/blackjack/ self.natural = natural # Flag for full agreement with the (Sutton and Barto, 2018) definition. Overrides self.natural self.sab = sab self.render_mode = render_mode def step(self, action): assert self.action_space.contains(action) if action: # hit: add a card to players hand and return self.player.append(draw_card(self.np_random)) if is_bust(self.player): terminated = True reward = -1.0 else: terminated = False reward = 0.0 else: # stick: play out the dealers hand, and score terminated = True while sum_hand(self.dealer) < 17: self.dealer.append(draw_card(self.np_random)) reward = cmp(score(self.player), score(self.dealer)) if self.sab and is_natural(self.player) and not is_natural(self.dealer): # Player automatically wins. Rules consistent with S&B reward = 1.0 elif ( not self.sab and self.natural and is_natural(self.player) and reward == 1.0 ): # Natural gives extra points, but doesn't autowin. Legacy implementation reward = 1.5 if self.render_mode == "human": self.render() # truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make` return self._get_obs(), reward, terminated, False, {} def _get_obs(self): # Optimization: Compute sum and usable ace in one pass per step, not two. player_sum, player_usable_ace = _hand_sum_and_usable_ace(self.player) return (player_sum, self.dealer[0], player_usable_ace) def reset( self, seed: int | None = None, options: dict | None = None, ): super().reset(seed=seed) self.dealer = draw_hand(self.np_random) self.player = draw_hand(self.np_random) _, dealer_card_value, _ = self._get_obs() suits = ["C", "D", "H", "S"] self.dealer_top_card_suit = self.np_random.choice(suits) if dealer_card_value == 1: self.dealer_top_card_value_str = "A" elif dealer_card_value == 10: self.dealer_top_card_value_str = self.np_random.choice(["J", "Q", "K"]) else: self.dealer_top_card_value_str = str(dealer_card_value) if self.render_mode == "human": self.render() return self._get_obs(), {} def render(self): if self.render_mode is None: assert self.spec is not None gym.logger.warn( "You are calling render method without specifying any render mode. " "You can specify the render_mode at initialization, " f'e.g. gym.make("{self.spec.id}", render_mode="rgb_array")' ) return try: import pygame except ImportError as e: raise DependencyNotInstalled( 'pygame is not installed, run `pip install "gymnasium[toy-text]"`' ) from e player_sum, dealer_card_value, usable_ace = self._get_obs() screen_width, screen_height = 600, 500 card_img_height = screen_height // 3 card_img_width = int(card_img_height * 142 / 197) spacing = screen_height // 20 bg_color = (7, 99, 36) white = (255, 255, 255) if not hasattr(self, "screen"): pygame.init() if self.render_mode == "human": pygame.display.init() self.screen = pygame.display.set_mode((screen_width, screen_height)) else: pygame.font.init() self.screen = pygame.Surface((screen_width, screen_height)) if not hasattr(self, "clock"): self.clock = pygame.time.Clock() self.screen.fill(bg_color) def get_image(path): cwd = os.path.dirname(__file__) image = pygame.image.load(os.path.join(cwd, path)) return image def get_font(path, size): cwd = os.path.dirname(__file__) font = pygame.font.Font(os.path.join(cwd, path), size) return font small_font = get_font( os.path.join("font", "Minecraft.ttf"), screen_height // 15 ) dealer_text = small_font.render( "Dealer: " + str(dealer_card_value), True, white ) dealer_text_rect = self.screen.blit(dealer_text, (spacing, spacing)) def scale_card_img(card_img): return pygame.transform.scale(card_img, (card_img_width, card_img_height)) dealer_card_img = scale_card_img( get_image( os.path.join( "img", f"{self.dealer_top_card_suit}{self.dealer_top_card_value_str}.png", ) ) ) dealer_card_rect = self.screen.blit( dealer_card_img, ( screen_width // 2 - card_img_width - spacing // 2, dealer_text_rect.bottom + spacing, ), ) hidden_card_img = scale_card_img(get_image(os.path.join("img", "Card.png"))) self.screen.blit( hidden_card_img, ( screen_width // 2 + spacing // 2, dealer_text_rect.bottom + spacing, ), ) player_text = small_font.render("Player", True, white) player_text_rect = self.screen.blit( player_text, (spacing, dealer_card_rect.bottom + 1.5 * spacing) ) large_font = get_font(os.path.join("font", "Minecraft.ttf"), screen_height // 6) player_sum_text = large_font.render(str(player_sum), True, white) player_sum_text_rect = self.screen.blit( player_sum_text, ( screen_width // 2 - player_sum_text.get_width() // 2, player_text_rect.bottom + spacing, ), ) if usable_ace: usable_ace_text = small_font.render("usable ace", True, white) self.screen.blit( usable_ace_text, ( screen_width // 2 - usable_ace_text.get_width() // 2, player_sum_text_rect.bottom + spacing // 2, ), ) if self.render_mode == "human": pygame.event.pump() pygame.display.update() self.clock.tick(self.metadata["render_fps"]) else: return np.transpose( np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2) ) def close(self): if hasattr(self, "screen"): import pygame pygame.display.quit() pygame.quit() # Pixel art from Mariia Khmelnytska (https://www.123rf.com/photo_104453049_stock-vector-pixel-art-playing-cards-standart-deck-vector-set.html)
BlackjackEnv
python
huggingface__transformers
src/transformers/models/esm/modeling_esm.py
{ "start": 2541, "end": 4414 }
class ____(torch.nn.Module): """ Rotary position embeddings based on those in [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation matrices which depend on their relative positions. """ inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim: int): super().__init__() # Generate and save the inverse frequency buffer (non trainable) inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) self.register_buffer("inv_freq", inv_freq) self._seq_len_cached = None self._cos_cached = None self._sin_cached = None def _update_cos_sin_tables(self, x, seq_dimension=2): seq_len = x.shape[seq_dimension] # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if seq_len != self._seq_len_cached or self._cos_cached.device != x.device: self._seq_len_cached = seq_len t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self._cos_cached = emb.cos()[None, None, :, :] self._sin_cached = emb.sin()[None, None, :, :] return self._cos_cached, self._sin_cached def forward(self, q: torch.Tensor, k: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2) return ( apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached).to(dtype=q.dtype), apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached).to(dtype=k.dtype), )
RotaryEmbedding
python
python-markdown__markdown
markdown/blockprocessors.py
{ "start": 21971, "end": 23336 }
class ____(BlockProcessor): """ Process Horizontal Rules. """ # Python's `re` module doesn't officially support atomic grouping. However you can fake it. # See https://stackoverflow.com/a/13577411/866026 RE = r'^[ ]{0,3}(?=(?P<atomicgroup>(-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,}))(?P=atomicgroup)[ ]*$' # Detect hr on any line of a block. SEARCH_RE = re.compile(RE, re.MULTILINE) def test(self, parent: etree.Element, block: str) -> bool: m = self.SEARCH_RE.search(block) if m: # Save match object on class instance so we can use it later. self.match = m return True return False def run(self, parent: etree.Element, blocks: list[str]) -> None: block = blocks.pop(0) match = self.match # Check for lines in block before `hr`. prelines = block[:match.start()].rstrip('\n') if prelines: # Recursively parse lines before `hr` so they get parsed first. self.parser.parseBlocks(parent, [prelines]) # create hr etree.SubElement(parent, 'hr') # check for lines in block after `hr`. postlines = block[match.end():].lstrip('\n') if postlines: # Add lines after `hr` to master blocks for later parsing. blocks.insert(0, postlines)
HRProcessor
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/output/base.py
{ "start": 6087, "end": 8348 }
class ____(Output): """ For testing. An output class that doesn't render anything. """ def fileno(self) -> int: "There is no sensible default for fileno()." raise NotImplementedError def encoding(self) -> str: return "utf-8" def write(self, data: str) -> None: pass def write_raw(self, data: str) -> None: pass def set_title(self, title: str) -> None: pass def clear_title(self) -> None: pass def flush(self) -> None: pass def erase_screen(self) -> None: pass def enter_alternate_screen(self) -> None: pass def quit_alternate_screen(self) -> None: pass def enable_mouse_support(self) -> None: pass def disable_mouse_support(self) -> None: pass def erase_end_of_line(self) -> None: pass def erase_down(self) -> None: pass def reset_attributes(self) -> None: pass def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None: pass def disable_autowrap(self) -> None: pass def enable_autowrap(self) -> None: pass def cursor_goto(self, row: int = 0, column: int = 0) -> None: pass def cursor_up(self, amount: int) -> None: pass def cursor_down(self, amount: int) -> None: pass def cursor_forward(self, amount: int) -> None: pass def cursor_backward(self, amount: int) -> None: pass def hide_cursor(self) -> None: pass def show_cursor(self) -> None: pass def set_cursor_shape(self, cursor_shape: CursorShape) -> None: pass def reset_cursor_shape(self) -> None: pass def ask_for_cpr(self) -> None: pass def bell(self) -> None: pass def enable_bracketed_paste(self) -> None: pass def disable_bracketed_paste(self) -> None: pass def scroll_buffer_to_prompt(self) -> None: pass def get_size(self) -> Size: return Size(rows=40, columns=80) def get_rows_below_cursor_position(self) -> int: return 40 def get_default_color_depth(self) -> ColorDepth: return ColorDepth.DEPTH_1_BIT
DummyOutput
python
pypa__hatch
tests/env/plugin/test_interface.py
{ "start": 30079, "end": 32197 }
class ____: def test_default(self, isolation, isolated_data_dir, platform, global_application): config = {"project": {"name": "my_app", "version": "0.0.1"}} project = Project(isolation, config=config) environment = MockEnvironment( isolation, project.metadata, "default", project.config.envs["default"], {}, isolated_data_dir, isolated_data_dir, platform, 0, global_application, ) assert environment.description == environment.description == "" def test_not_string(self, isolation, isolated_data_dir, platform, global_application): config = { "project": {"name": "my_app", "version": "0.0.1"}, "tool": {"hatch": {"envs": {"default": {"description": 9000}}}}, } project = Project(isolation, config=config) environment = MockEnvironment( isolation, project.metadata, "default", project.config.envs["default"], {}, isolated_data_dir, isolated_data_dir, platform, 0, global_application, ) with pytest.raises(TypeError, match="Field `tool.hatch.envs.default.description` must be a string"): _ = environment.description def test_correct(self, isolation, isolated_data_dir, platform, global_application): description = "foo" config = { "project": {"name": "my_app", "version": "0.0.1"}, "tool": {"hatch": {"envs": {"default": {"description": description}}}}, } project = Project(isolation, config=config) environment = MockEnvironment( isolation, project.metadata, "default", project.config.envs["default"], {}, isolated_data_dir, isolated_data_dir, platform, 0, global_application, ) assert environment.description is description
TestDescription
python
walkccc__LeetCode
solutions/2011. Final Value of Variable After Performing Operations/2011.py
{ "start": 0, "end": 141 }
class ____: def finalValueAfterOperations(self, operations: list[str]) -> int: return sum(op[1] == '+' or -1 for op in operations)
Solution
python
pandas-dev__pandas
pandas/core/methods/selectn.py
{ "start": 978, "end": 1913 }
class ____(Generic[NDFrameT]): def __init__( self, obj: NDFrameT, n: int, keep: Literal["first", "last", "all"] ) -> None: self.obj = obj self.n = n self.keep = keep if self.keep not in ("first", "last", "all"): raise ValueError('keep must be either "first", "last" or "all"') def compute(self, method: str) -> NDFrameT: raise NotImplementedError @final def nlargest(self) -> NDFrameT: return self.compute("nlargest") @final def nsmallest(self) -> NDFrameT: return self.compute("nsmallest") @final @staticmethod def is_valid_dtype_n_method(dtype: DtypeObj) -> bool: """ Helper function to determine if dtype is valid for nsmallest/nlargest methods """ if is_numeric_dtype(dtype): return not is_complex_dtype(dtype) return needs_i8_conversion(dtype)
SelectN
python
python-pillow__Pillow
src/PIL/ImageFont.py
{ "start": 6088, "end": 26820 }
class ____: """FreeType font wrapper (requires _imagingft service)""" font: Font font_bytes: bytes def __init__( self, font: StrOrBytesPath | BinaryIO, size: float = 10, index: int = 0, encoding: str = "", layout_engine: Layout | None = None, ) -> None: # FIXME: use service provider instead if isinstance(core, DeferredError): raise core.ex if size <= 0: msg = f"font size must be greater than 0, not {size}" raise ValueError(msg) self.path = font self.size = size self.index = index self.encoding = encoding if layout_engine not in (Layout.BASIC, Layout.RAQM): layout_engine = Layout.BASIC if core.HAVE_RAQM: layout_engine = Layout.RAQM elif layout_engine == Layout.RAQM and not core.HAVE_RAQM: warnings.warn( "Raqm layout was requested, but Raqm is not available. " "Falling back to basic layout." ) layout_engine = Layout.BASIC self.layout_engine = layout_engine def load_from_bytes(f: IO[bytes]) -> None: self.font_bytes = f.read() self.font = core.getfont( "", size, index, encoding, self.font_bytes, layout_engine ) if is_path(font): font = os.fspath(font) if sys.platform == "win32": font_bytes_path = font if isinstance(font, bytes) else font.encode() try: font_bytes_path.decode("ascii") except UnicodeDecodeError: # FreeType cannot load fonts with non-ASCII characters on Windows # So load it into memory first with open(font, "rb") as f: load_from_bytes(f) return self.font = core.getfont( font, size, index, encoding, layout_engine=layout_engine ) else: load_from_bytes(cast(IO[bytes], font)) def __getstate__(self) -> list[Any]: return [self.path, self.size, self.index, self.encoding, self.layout_engine] def __setstate__(self, state: list[Any]) -> None: path, size, index, encoding, layout_engine = state FreeTypeFont.__init__(self, path, size, index, encoding, layout_engine) def getname(self) -> tuple[str | None, str | None]: """ :return: A tuple of the font family (e.g. Helvetica) and the font style (e.g. Bold) """ return self.font.family, self.font.style def getmetrics(self) -> tuple[int, int]: """ :return: A tuple of the font ascent (the distance from the baseline to the highest outline point) and descent (the distance from the baseline to the lowest outline point, a negative value) """ return self.font.ascent, self.font.descent def getlength( self, text: str | bytes, mode: str = "", direction: str | None = None, features: list[str] | None = None, language: str | None = None, ) -> float: """ Returns length (in pixels with 1/64 precision) of given text when rendered in font with provided direction, features, and language. This is the amount by which following text should be offset. Text bounding box may extend past the length in some fonts, e.g. when using italics or accents. The result is returned as a float; it is a whole number if using basic layout. Note that the sum of two lengths may not equal the length of a concatenated string due to kerning. If you need to adjust for kerning, include the following character and subtract its length. For example, instead of :: hello = font.getlength("Hello") world = font.getlength("World") hello_world = hello + world # not adjusted for kerning assert hello_world == font.getlength("HelloWorld") # may fail use :: hello = font.getlength("HelloW") - font.getlength("W") # adjusted for kerning world = font.getlength("World") hello_world = hello + world # adjusted for kerning assert hello_world == font.getlength("HelloWorld") # True or disable kerning with (requires libraqm) :: hello = draw.textlength("Hello", font, features=["-kern"]) world = draw.textlength("World", font, features=["-kern"]) hello_world = hello + world # kerning is disabled, no need to adjust assert hello_world == draw.textlength("HelloWorld", font, features=["-kern"]) .. versionadded:: 8.0.0 :param text: Text to measure. :param mode: Used by some graphics drivers to indicate what mode the driver prefers; if empty, the renderer may return either mode. Note that the mode is always a string, to simplify C-level implementations. :param direction: Direction of the text. It can be 'rtl' (right to left), 'ltr' (left to right) or 'ttb' (top to bottom). Requires libraqm. :param features: A list of OpenType font features to be used during text layout. This is usually used to turn on optional font features that are not enabled by default, for example 'dlig' or 'ss01', but can be also used to turn off default font features for example '-liga' to disable ligatures or '-kern' to disable kerning. To get all supported features, see https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist Requires libraqm. :param language: Language of the text. Different languages may use different glyph shapes or ligatures. This parameter tells the font which language the text is in, and to apply the correct substitutions as appropriate, if available. It should be a `BCP 47 language code <https://www.w3.org/International/articles/language-tags/>`_ Requires libraqm. :return: Either width for horizontal text, or height for vertical text. """ _string_length_check(text) return self.font.getlength(text, mode, direction, features, language) / 64 def getbbox( self, text: str | bytes, mode: str = "", direction: str | None = None, features: list[str] | None = None, language: str | None = None, stroke_width: float = 0, anchor: str | None = None, ) -> tuple[float, float, float, float]: """ Returns bounding box (in pixels) of given text relative to given anchor when rendered in font with provided direction, features, and language. Use :py:meth:`getlength()` to get the offset of following text with 1/64 pixel precision. The bounding box includes extra margins for some fonts, e.g. italics or accents. .. versionadded:: 8.0.0 :param text: Text to render. :param mode: Used by some graphics drivers to indicate what mode the driver prefers; if empty, the renderer may return either mode. Note that the mode is always a string, to simplify C-level implementations. :param direction: Direction of the text. It can be 'rtl' (right to left), 'ltr' (left to right) or 'ttb' (top to bottom). Requires libraqm. :param features: A list of OpenType font features to be used during text layout. This is usually used to turn on optional font features that are not enabled by default, for example 'dlig' or 'ss01', but can be also used to turn off default font features for example '-liga' to disable ligatures or '-kern' to disable kerning. To get all supported features, see https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist Requires libraqm. :param language: Language of the text. Different languages may use different glyph shapes or ligatures. This parameter tells the font which language the text is in, and to apply the correct substitutions as appropriate, if available. It should be a `BCP 47 language code <https://www.w3.org/International/articles/language-tags/>`_ Requires libraqm. :param stroke_width: The width of the text stroke. :param anchor: The text anchor alignment. Determines the relative location of the anchor to the text. The default alignment is top left, specifically ``la`` for horizontal text and ``lt`` for vertical text. See :ref:`text-anchors` for details. :return: ``(left, top, right, bottom)`` bounding box """ _string_length_check(text) size, offset = self.font.getsize( text, mode, direction, features, language, anchor ) left, top = offset[0] - stroke_width, offset[1] - stroke_width width, height = size[0] + 2 * stroke_width, size[1] + 2 * stroke_width return left, top, left + width, top + height def getmask( self, text: str | bytes, mode: str = "", direction: str | None = None, features: list[str] | None = None, language: str | None = None, stroke_width: float = 0, anchor: str | None = None, ink: int = 0, start: tuple[float, float] | None = None, ) -> Image.core.ImagingCore: """ Create a bitmap for the text. If the font uses antialiasing, the bitmap should have mode ``L`` and use a maximum value of 255. If the font has embedded color data, the bitmap should have mode ``RGBA``. Otherwise, it should have mode ``1``. :param text: Text to render. :param mode: Used by some graphics drivers to indicate what mode the driver prefers; if empty, the renderer may return either mode. Note that the mode is always a string, to simplify C-level implementations. .. versionadded:: 1.1.5 :param direction: Direction of the text. It can be 'rtl' (right to left), 'ltr' (left to right) or 'ttb' (top to bottom). Requires libraqm. .. versionadded:: 4.2.0 :param features: A list of OpenType font features to be used during text layout. This is usually used to turn on optional font features that are not enabled by default, for example 'dlig' or 'ss01', but can be also used to turn off default font features for example '-liga' to disable ligatures or '-kern' to disable kerning. To get all supported features, see https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist Requires libraqm. .. versionadded:: 4.2.0 :param language: Language of the text. Different languages may use different glyph shapes or ligatures. This parameter tells the font which language the text is in, and to apply the correct substitutions as appropriate, if available. It should be a `BCP 47 language code <https://www.w3.org/International/articles/language-tags/>`_ Requires libraqm. .. versionadded:: 6.0.0 :param stroke_width: The width of the text stroke. .. versionadded:: 6.2.0 :param anchor: The text anchor alignment. Determines the relative location of the anchor to the text. The default alignment is top left, specifically ``la`` for horizontal text and ``lt`` for vertical text. See :ref:`text-anchors` for details. .. versionadded:: 8.0.0 :param ink: Foreground ink for rendering in RGBA mode. .. versionadded:: 8.0.0 :param start: Tuple of horizontal and vertical offset, as text may render differently when starting at fractional coordinates. .. versionadded:: 9.4.0 :return: An internal PIL storage memory instance as defined by the :py:mod:`PIL.Image.core` interface module. """ return self.getmask2( text, mode, direction=direction, features=features, language=language, stroke_width=stroke_width, anchor=anchor, ink=ink, start=start, )[0] def getmask2( self, text: str | bytes, mode: str = "", direction: str | None = None, features: list[str] | None = None, language: str | None = None, stroke_width: float = 0, anchor: str | None = None, ink: int = 0, start: tuple[float, float] | None = None, *args: Any, **kwargs: Any, ) -> tuple[Image.core.ImagingCore, tuple[int, int]]: """ Create a bitmap for the text. If the font uses antialiasing, the bitmap should have mode ``L`` and use a maximum value of 255. If the font has embedded color data, the bitmap should have mode ``RGBA``. Otherwise, it should have mode ``1``. :param text: Text to render. :param mode: Used by some graphics drivers to indicate what mode the driver prefers; if empty, the renderer may return either mode. Note that the mode is always a string, to simplify C-level implementations. .. versionadded:: 1.1.5 :param direction: Direction of the text. It can be 'rtl' (right to left), 'ltr' (left to right) or 'ttb' (top to bottom). Requires libraqm. .. versionadded:: 4.2.0 :param features: A list of OpenType font features to be used during text layout. This is usually used to turn on optional font features that are not enabled by default, for example 'dlig' or 'ss01', but can be also used to turn off default font features for example '-liga' to disable ligatures or '-kern' to disable kerning. To get all supported features, see https://learn.microsoft.com/en-us/typography/opentype/spec/featurelist Requires libraqm. .. versionadded:: 4.2.0 :param language: Language of the text. Different languages may use different glyph shapes or ligatures. This parameter tells the font which language the text is in, and to apply the correct substitutions as appropriate, if available. It should be a `BCP 47 language code <https://www.w3.org/International/articles/language-tags/>`_ Requires libraqm. .. versionadded:: 6.0.0 :param stroke_width: The width of the text stroke. .. versionadded:: 6.2.0 :param anchor: The text anchor alignment. Determines the relative location of the anchor to the text. The default alignment is top left, specifically ``la`` for horizontal text and ``lt`` for vertical text. See :ref:`text-anchors` for details. .. versionadded:: 8.0.0 :param ink: Foreground ink for rendering in RGBA mode. .. versionadded:: 8.0.0 :param start: Tuple of horizontal and vertical offset, as text may render differently when starting at fractional coordinates. .. versionadded:: 9.4.0 :return: A tuple of an internal PIL storage memory instance as defined by the :py:mod:`PIL.Image.core` interface module, and the text offset, the gap between the starting coordinate and the first marking """ _string_length_check(text) if start is None: start = (0, 0) def fill(width: int, height: int) -> Image.core.ImagingCore: size = (width, height) Image._decompression_bomb_check(size) return Image.core.fill("RGBA" if mode == "RGBA" else "L", size) return self.font.render( text, fill, mode, direction, features, language, stroke_width, kwargs.get("stroke_filled", False), anchor, ink, start, ) def font_variant( self, font: StrOrBytesPath | BinaryIO | None = None, size: float | None = None, index: int | None = None, encoding: str | None = None, layout_engine: Layout | None = None, ) -> FreeTypeFont: """ Create a copy of this FreeTypeFont object, using any specified arguments to override the settings. Parameters are identical to the parameters used to initialize this object. :return: A FreeTypeFont object. """ if font is None: try: font = BytesIO(self.font_bytes) except AttributeError: font = self.path return FreeTypeFont( font=font, size=self.size if size is None else size, index=self.index if index is None else index, encoding=self.encoding if encoding is None else encoding, layout_engine=layout_engine or self.layout_engine, ) def get_variation_names(self) -> list[bytes]: """ :returns: A list of the named styles in a variation font. :exception OSError: If the font is not a variation font. """ names = self.font.getvarnames() return [name.replace(b"\x00", b"") for name in names] def set_variation_by_name(self, name: str | bytes) -> None: """ :param name: The name of the style. :exception OSError: If the font is not a variation font. """ names = self.get_variation_names() if not isinstance(name, bytes): name = name.encode() index = names.index(name) + 1 if index == getattr(self, "_last_variation_index", None): # When the same name is set twice in a row, # there is an 'unknown freetype error' # https://savannah.nongnu.org/bugs/?56186 return self._last_variation_index = index self.font.setvarname(index) def get_variation_axes(self) -> list[Axis]: """ :returns: A list of the axes in a variation font. :exception OSError: If the font is not a variation font. """ axes = self.font.getvaraxes() for axis in axes: if axis["name"]: axis["name"] = axis["name"].replace(b"\x00", b"") return axes def set_variation_by_axes(self, axes: list[float]) -> None: """ :param axes: A list of values for each axis. :exception OSError: If the font is not a variation font. """ self.font.setvaraxes(axes)
FreeTypeFont
python
run-llama__llama_index
llama-index-core/llama_index/core/readers/file/base.py
{ "start": 5011, "end": 5595 }
class ____: """ Default file metadata function wrapper which stores the fs. Allows for pickling of the function. """ def __init__(self, fs: fsspec.AbstractFileSystem | None = None): self.fs = fs or get_default_fs() def __call__(self, file_path: str) -> dict: return default_file_metadata_func(file_path, self.fs) def get_default_fs() -> fsspec.AbstractFileSystem: return LocalFileSystem() def is_default_fs(fs: fsspec.AbstractFileSystem) -> bool: return isinstance(fs, LocalFileSystem) and not fs.auto_mkdir
_DefaultFileMetadataFunc
python
Pylons__pyramid
tests/test_traversal.py
{ "start": 35919, "end": 38339 }
class ____(unittest.TestCase): def setUp(self): cleanUp() def tearDown(self): cleanUp() def _callFUT(self, resource, request): from pyramid.traversal import virtual_root return virtual_root(resource, request) def _registerTraverser(self, traverser): from pyramid.threadlocal import get_current_registry reg = get_current_registry() from zope.interface import Interface from pyramid.interfaces import ITraverser reg.registerAdapter(traverser, (Interface,), ITraverser) def test_virtual_root_no_virtual_root_path(self): root = DummyContext() root.__name__ = None root.__parent__ = None one = DummyContext() one.__name__ = 'one' one.__parent__ = root request = DummyRequest() result = self._callFUT(one, request) self.assertEqual(result, root) def test_virtual_root_no_virtual_root_path_with_root_on_request(self): context = DummyContext() context.__parent__ = None request = DummyRequest() request.root = DummyContext() result = self._callFUT(context, request) self.assertEqual(result, request.root) def test_virtual_root_with_virtual_root_path(self): from pyramid.interfaces import VH_ROOT_KEY root = DummyContext() root.__parent__ = None context = DummyContext() context.__name__ = 'one' context.__parent__ = root traversed_to = DummyContext() environ = {VH_ROOT_KEY: '/one'} request = DummyRequest(environ) traverser = make_traverser({'context': traversed_to, 'view_name': ''}) self._registerTraverser(traverser) result = self._callFUT(context, request) self.assertEqual(result, traversed_to) self.assertEqual(root.request.environ['PATH_INFO'], '/one') def test_default(self): context = DummyContext() request = _makeRequest() request.environ['PATH_INFO'] = '/' result = self._callFUT(context, request) self.assertEqual(result, context) def test_default_no_registry_on_request(self): context = DummyContext() request = _makeRequest() del request.registry request.environ['PATH_INFO'] = '/' result = self._callFUT(context, request) self.assertEqual(result, context)
TestVirtualRoot
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/tokens.py
{ "start": 9944, "end": 10185 }
class ____(Token): __slots__ = ('value',) id = '<tag>' def __init__(self, value, start_mark, end_mark): # type: (Any, Any, Any) -> None Token.__init__(self, start_mark, end_mark) self.value = value
TagToken
python
plotly__plotly.py
plotly/graph_objs/funnel/marker/colorbar/_tickformatstop.py
{ "start": 233, "end": 8544 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "funnel.marker.colorbar" _path_str = "funnel.marker.colorbar.tickformatstop" _valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"} @property def dtickrange(self): """ range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" The 'dtickrange' property is an info array that may be specified as: * a list or tuple of 2 elements where: (0) The 'dtickrange[0]' property accepts values of any type (1) The 'dtickrange[1]' property accepts values of any type Returns ------- list """ return self["dtickrange"] @dtickrange.setter def dtickrange(self, val): self["dtickrange"] = val @property def enabled(self): """ Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. The 'enabled' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["enabled"] @enabled.setter def enabled(self, val): self["enabled"] = val @property def name(self): """ When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. The 'name' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["name"] @name.setter def name(self, val): self["name"] = val @property def templateitemname(self): """ Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. The 'templateitemname' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["templateitemname"] @templateitemname.setter def templateitemname(self, val): self["templateitemname"] = val @property def value(self): """ string - dtickformat for described zoom level, the same as "tickformat" The 'value' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["value"] @value.setter def value(self, val): self["value"] = val @property def _prop_descriptions(self): return """\ dtickrange range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" enabled Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. name When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. templateitemname Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. value string - dtickformat for described zoom level, the same as "tickformat" """ def __init__( self, arg=None, dtickrange=None, enabled=None, name=None, templateitemname=None, value=None, **kwargs, ): """ Construct a new Tickformatstop object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.funnel.marker. colorbar.Tickformatstop` dtickrange range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" enabled Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. name When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. templateitemname Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. value string - dtickformat for described zoom level, the same as "tickformat" Returns ------- Tickformatstop """ super().__init__("tickformatstops") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.funnel.marker.colorbar.Tickformatstop constructor must be a dict or an instance of :class:`plotly.graph_objs.funnel.marker.colorbar.Tickformatstop`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("dtickrange", arg, dtickrange) self._set_property("enabled", arg, enabled) self._set_property("name", arg, name) self._set_property("templateitemname", arg, templateitemname) self._set_property("value", arg, value) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Tickformatstop
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/matchClass1.py
{ "start": 7737, "end": 7834 }
class ____: val1: int val2: str = field(init=False) val3: complex @dataclass
Dataclass1
python
getsentry__sentry
src/sentry/auth/superuser.py
{ "start": 5831, "end": 6030 }
class ____(SentryAPIException): status_code = status.HTTP_400_BAD_REQUEST code = "invalid-superuser-access-json" message = "The request contains invalid json"
SuperuserAccessFormInvalidJson
python
scipy__scipy
scipy/stats/tests/test_generation/reference_distributions.py
{ "start": 57, "end": 10226 }
class ____: """Minimalist distribution infrastructure for generating reference data. The purpose is to generate reference values for unit tests of SciPy distribution accuracy and robustness. Handles array input with standard broadcasting rules, and method implementations are easily compared against their mathematical definitions. No attempt is made to handle edge cases or be fast, and arbitrary precision arithmetic is trusted for accuracy rather than making the method implementations "smart". Notes ----- In this infrastructure, distributions families are classes, and fully-specified distributions (i.e. with definite values of all family parameters) are instances of these classes. Typically, the public methods accept as input only the argument at which the at which the function is to be evaluated. Unlike SciPy distributions, they never accept values of distribution family shape, location, or scale parameters. A few other parameters are noteworthy: - All methods accept `dtype` to control the output data type. The default is `np.float64`, but `object` or `mp.mpf` may be specified to output the full `mpf`. - `ppf`/`isf` accept a `guess` because they use a scalar rootfinder to invert the `cdf`/`sf`. This is passed directly into the `x0` method of `mpmath.findroot`; see its documentation for details. - moment accepts `order`, an integer that specifies the order of the (raw) moment, and `center`, which is the value about which the moment is taken. The default is to calculate the mean and use it to calculate central moments; passing ``0`` results in a noncentral moment. For efficiency, the mean can be passed explicitly if it is already known. Follow the example of SkewNormal to generate new reference distributions, overriding only `__init__` and `_pdf`*. Use the reference distributions to generate reference values for unit tests of SciPy distribution method precision and robustness (e.g. for extreme arguments). If the a SciPy methods implementation is independent and yet the output matches reference values generated with this infrastructure, it is unlikely that the SciPy and reference values are both inaccurate. * If the SciPy output *doesn't* match and the cause appears to be inaccuracy of the reference values (e.g. due to numerical issues that mpmath's arbitrary precision arithmetic doesn't handle), then it may be appropriate to override a method of the reference distribution rather than relying on the generic implementation. Otherwise, hesitate to override methods: the generic implementations are mathematically correct and easy to verify, whereas an override introduces many possibilities of mistakes, requires more time to write, and requires more time to review. In general, do not create custom unit tests to ensure that SciPy distribution methods are *correct* (in the sense of being consistent with the rest of the distribution methods); generic tests take care of that. """ def __init__(self, **kwargs): try: if mpmath.dps is not None: message = ("`mpmath.dps` has been assigned. This is not " "intended usage; instead, assign the desired " "precision to `mpmath.mp.dps` (e.g. `from mpmath " "as mp; mp.dps = 50.") raise RuntimeError(message) except AttributeError: mpmath.dps = None if mp.dps <= 15: message = ("`mpmath.mp.dps <= 15`. Set a higher precision (e.g." "`50`) to use this distribution.") raise RuntimeError(message) self._params = {key:self._make_mpf_array(val) for key, val in kwargs.items()} def _make_mpf_array(self, x): shape = np.shape(x) x = np.asarray(x, dtype=np.float64).ravel() return np.asarray([mp.mpf(xi) for xi in x]).reshape(shape)[()] def _pdf(self, x): raise NotImplementedError("_pdf must be overridden.") def _cdf(self, x, **kwargs): if ((self._cdf.__func__ is ReferenceDistribution._cdf) and (self._sf.__func__ is not ReferenceDistribution._sf)): return mp.one - self._sf(x, **kwargs) a, b = self._support(**kwargs) res = mp.quad(lambda x: self._pdf(x, **kwargs), (a, x)) res = res if res < 0.5 else mp.one - self._sf(x, **kwargs) return res def _sf(self, x, **kwargs): if ((self._sf.__func__ is ReferenceDistribution._sf) and (self._cdf.__func__ is not ReferenceDistribution._cdf)): return mp.one - self._cdf(x, **kwargs) a, b = self._support(**kwargs) res = mp.quad(lambda x: self._pdf(x, **kwargs), (x, b)) res = res if res < 0.5 else mp.one - self._cdf(x, **kwargs) return res def _ppf(self, p, guess=0, **kwargs): if ((self._ppf.__func__ is ReferenceDistribution._ppf) and (self._isf.__func__ is not ReferenceDistribution._isf)): return self._isf(mp.one - p, guess, **kwargs) def f(x): return self._cdf(x, **kwargs) - p return mp.findroot(f, guess) def _isf(self, p, guess=0, **kwargs): if ((self._isf.__func__ is ReferenceDistribution._isf) and (self._ppf.__func__ is not ReferenceDistribution._ppf)): return self._ppf(mp.one - p, guess, **kwargs) def f(x): return self._sf(x, **kwargs) - p return mp.findroot(f, guess) def _logpdf(self, x, **kwargs): return mp.log(self._pdf(x, **kwargs)) def _logcdf(self, x, **kwargs): return mp.log(self._cdf(x, **kwargs)) def _logsf(self, x, **kwargs): return mp.log(self._sf(x, **kwargs)) def _support(self, **kwargs): return -mp.inf, mp.inf def _entropy(self, **kwargs): def integrand(x): logpdf = self._logpdf(x, **kwargs) pdf = mp.exp(logpdf) return -pdf*logpdf a, b = self._support(**kwargs) return mp.quad(integrand, (a, b)) def _mean(self, **kwargs): return self._moment(order=1, center=0, **kwargs) def _var(self, **kwargs): mu = self._mean(**kwargs) return self._moment(order=2, center=mu, **kwargs) def _skew(self, **kwargs): mu = self._mean(**kwargs) u2 = self._moment(order=2, center=mu, **kwargs) sigma = mp.sqrt(u2) u3 = self._moment(order=3, center=mu, **kwargs) return u3 / sigma**3 def _kurtosis(self, **kwargs): mu = self._mean(**kwargs) u2 = self._moment(order=2, center=mu, **kwargs) u4 = self._moment(order=4, center=mu, **kwargs) return u4 / u2**2 - 3 def _moment(self, order, center, **kwargs): def integrand(x): return self._pdf(x, **kwargs)*(x - center)**order if center is None: center = self._mean(**kwargs) a, b = self._support(**kwargs) return mp.quad(integrand, (a, b)) def pdf(self, x, dtype=np.float64): fun = np.vectorize(self._pdf) x = self._make_mpf_array(x) res = fun(x, **self._params) return np.asarray(res, dtype=dtype)[()] def cdf(self, x, dtype=np.float64): fun = np.vectorize(self._cdf) x = self._make_mpf_array(x) res = fun(x, **self._params) return np.asarray(res, dtype=dtype)[()] def sf(self, x, dtype=np.float64): fun = np.vectorize(self._sf) x = self._make_mpf_array(x) res = fun(x, **self._params) return np.asarray(res, dtype=dtype)[()] def ppf(self, x, guess=0, dtype=np.float64): fun = np.vectorize(self._ppf, excluded={1}) # don't vectorize guess x = self._make_mpf_array(x) res = fun(x, guess, **self._params) return np.asarray(res, dtype=dtype)[()] def isf(self, x, guess=0, dtype=np.float64): fun = np.vectorize(self._isf, excluded={1}) # don't vectorize guess x = self._make_mpf_array(x) res = fun(x, guess, **self._params) return np.asarray(res, dtype=dtype)[()] def logpdf(self, x, dtype=np.float64): fun = np.vectorize(self._logpdf) x = self._make_mpf_array(x) res = fun(x, **self._params) return np.asarray(res, dtype=dtype)[()] def logcdf(self, x, dtype=np.float64): fun = np.vectorize(self._logcdf) x = self._make_mpf_array(x) res = fun(x, **self._params) return np.asarray(res, dtype=dtype)[()] def logsf(self, x, dtype=np.float64): fun = np.vectorize(self._logsf) x = self._make_mpf_array(x) res = fun(x, **self._params) return np.asarray(res, dtype=dtype)[()] def support(self, dtype=np.float64): fun = np.vectorize(self._support) res = fun(**self._params) return np.asarray(res, dtype=dtype)[()] def entropy(self, dtype=np.float64): fun = np.vectorize(self._entropy) res = fun(**self._params) return np.asarray(res, dtype=dtype)[()] def mean(self, dtype=np.float64): fun = np.vectorize(self._mean) res = fun(**self._params) return np.asarray(res, dtype=dtype)[()] def var(self, dtype=np.float64): fun = np.vectorize(self._var) res = fun(**self._params) return np.asarray(res, dtype=dtype)[()] def skew(self, dtype=np.float64): fun = np.vectorize(self._skew) res = fun(**self._params) return np.asarray(res, dtype=dtype)[()] def kurtosis(self, dtype=np.float64): fun = np.vectorize(self._kurtosis) res = fun(**self._params) return np.asarray(res, dtype=dtype)[()] def moment(self, order, center=None, dtype=np.float64): fun = np.vectorize(self._moment) order = self._make_mpf_array(order) res = fun(order, **self._params) return np.asarray(res, dtype=dtype)[()]
ReferenceDistribution
python
wandb__wandb
wandb/vendor/gql-0.2.0/wandb_gql/transport/http.py
{ "start": 0, "end": 172 }
class ____(object): def __init__(self, url, headers=None, cookies=None): self.url = url self.headers = headers self.cookies = cookies
HTTPTransport
python
spyder-ide__spyder
external-deps/qtconsole/qtconsole/kernel_mixins.py
{ "start": 430, "end": 628 }
class ____(MetaQObjectHasTraits('NewBase', (HasTraits, SuperQObject), {})): """ A KernelClient that provides signals and slots. """ kernel_restarted = QtCore.Signal()
QtKernelManagerMixin
python
sqlalchemy__sqlalchemy
test/orm/test_composites.py
{ "start": 24375, "end": 29549 }
class ____(fixtures.MappedTest): @testing.fixture def point_fixture(self, decl_base): def go(active_history): @dataclasses.dataclass class Point: x: int y: int class Edge(decl_base): __tablename__ = "edge" id = mapped_column(Integer, primary_key=True) start = composite( Point, mapped_column("x1", Integer), mapped_column("y1", Integer), active_history=active_history, ) end = composite( Point, mapped_column("x2", Integer, nullable=True), mapped_column("y2", Integer, nullable=True), active_history=active_history, ) decl_base.metadata.create_all(testing.db) return Point, Edge return go @testing.variation("active_history", [True, False]) @testing.variation("hist_on_mapping", [True, False]) def test_event_listener_no_value_to_set( self, point_fixture, active_history, hist_on_mapping ): if hist_on_mapping: config_active_history = bool(active_history) else: config_active_history = False Point, Edge = point_fixture(config_active_history) if not hist_on_mapping and active_history: Edge.start.impl.active_history = True m1 = mock.Mock() event.listen(Edge.start, "set", m1) e1 = Edge() e1.start = Point(5, 6) eq_( m1.mock_calls, [ mock.call( e1, Point(5, 6), ( LoaderCallableStatus.NO_VALUE if not active_history else Point(None, None) ), Edge.start.impl, ) ], ) eq_( inspect(e1).attrs.start.history, ([Point(5, 6)], (), [Point(None, None)]), ) @testing.variation("active_history", [True, False]) @testing.variation("hist_on_mapping", [True, False]) def test_event_listener_set_to_new( self, point_fixture, active_history, hist_on_mapping ): if hist_on_mapping: config_active_history = bool(active_history) else: config_active_history = False Point, Edge = point_fixture(config_active_history) if not hist_on_mapping and active_history: Edge.start.impl.active_history = True e1 = Edge() e1.start = Point(5, 6) sess = fixture_session() sess.add(e1) sess.commit() assert "start" not in e1.__dict__ m1 = mock.Mock() event.listen(Edge.start, "set", m1) e1.start = Point(7, 8) eq_( m1.mock_calls, [ mock.call( e1, Point(7, 8), ( LoaderCallableStatus.NO_VALUE if not active_history else Point(5, 6) ), Edge.start.impl, ) ], ) if active_history: eq_( inspect(e1).attrs.start.history, ([Point(7, 8)], (), [Point(5, 6)]), ) else: eq_( inspect(e1).attrs.start.history, ([Point(7, 8)], (), [Point(None, None)]), ) @testing.variation("active_history", [True, False]) @testing.variation("hist_on_mapping", [True, False]) def test_event_listener_set_to_deleted( self, point_fixture, active_history, hist_on_mapping ): if hist_on_mapping: config_active_history = bool(active_history) else: config_active_history = False Point, Edge = point_fixture(config_active_history) if not hist_on_mapping and active_history: Edge.start.impl.active_history = True e1 = Edge() e1.start = Point(5, 6) sess = fixture_session() sess.add(e1) sess.commit() assert "start" not in e1.__dict__ m1 = mock.Mock() event.listen(Edge.start, "remove", m1) del e1.start eq_( m1.mock_calls, [ mock.call( e1, ( LoaderCallableStatus.NO_VALUE if not active_history else Point(5, 6) ), Edge.start.impl, ) ], ) if active_history: eq_( inspect(e1).attrs.start.history, ([Point(None, None)], (), [Point(5, 6)]), ) else: eq_( inspect(e1).attrs.start.history, ([Point(None, None)], (), [Point(None, None)]), )
EventsEtcTest
python
streamlit__streamlit
lib/streamlit/elements/widgets/time_widgets.py
{ "start": 11861, "end": 13558 }
class ____: value: Sequence[date] | None is_range: bool max: date min: date @classmethod def from_raw_values( cls, value: DateValue, min_value: NullableScalarDateValue, max_value: NullableScalarDateValue, ) -> _DateInputValues: parsed_value, is_range = _parse_date_value(value=value) parsed_min = _parse_min_date( min_value=min_value, parsed_dates=parsed_value, ) parsed_max = _parse_max_date( max_value=max_value, parsed_dates=parsed_value, ) if value == "today": v = cast("list[date]", parsed_value)[0] if v < parsed_min: parsed_value = [parsed_min] if v > parsed_max: parsed_value = [parsed_max] return cls( value=parsed_value, is_range=is_range, min=parsed_min, max=parsed_max, ) def __post_init__(self) -> None: if self.min > self.max: raise StreamlitAPIException( f"The `min_value`, set to {self.min}, shouldn't be larger " f"than the `max_value`, set to {self.max}." ) if self.value: start_value = self.value[0] end_value = self.value[-1] if (start_value < self.min) or (end_value > self.max): raise StreamlitAPIException( f"The default `value` of {self.value} " f"must lie between the `min_value` of {self.min} " f"and the `max_value` of {self.max}, inclusively." ) @dataclass
_DateInputValues
python
bokeh__bokeh
src/bokeh/core/property/data_frame.py
{ "start": 1580, "end": 2223 }
class ____(Property["IntoDataFrame"]): """ Accept eager dataframe supported by Narwhals. This property only exists to support type validation, e.g. for "accepts" clauses. It is not serializable itself, and is not useful to add to Bokeh models directly. """ def validate(self, value: Any, detail: bool = True) -> None: import narwhals.stable.v1 as nw super().validate(value, detail) if nw.dependencies.is_into_dataframe(value): return msg = "" if not detail else f"expected object convertible to Narwhals DataFrame, got {value!r}" raise ValueError(msg)
EagerDataFrame
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_blank07.py
{ "start": 315, "end": 1383 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_blank07.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [45705856, 45843584] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chart.show_na_as_empty_cell() worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
django-haystack__django-haystack
test_haystack/elasticsearch7_tests/test_backend.py
{ "start": 27486, "end": 29716 }
class ____(TestCase): fixtures = ["base_data.json"] def setUp(self): super().setUp() # Wipe it clean. clear_elasticsearch_index() # Stow. self.old_ui = connections["elasticsearch"].get_unified_index() self.ui = UnifiedIndex() self.smmi = Elasticsearch7MockSearchIndex() self.ui.build(indexes=[self.smmi]) connections["elasticsearch"]._index = self.ui self.sb = connections["elasticsearch"].get_backend() self.sq = connections["elasticsearch"].get_query() # Force indexing of the content. self.smmi.update(using="elasticsearch") def tearDown(self): connections["elasticsearch"]._index = self.old_ui super().tearDown() def test_log_query(self): reset_search_queries() self.assertEqual(len(connections["elasticsearch"].queries), 0) with self.settings(DEBUG=False): len(self.sq.get_results()) self.assertEqual(len(connections["elasticsearch"].queries), 0) with self.settings(DEBUG=True): # Redefine it to clear out the cached results. self.sq = connections["elasticsearch"].query(using="elasticsearch") self.sq.add_filter(SQ(name="bar")) len(self.sq.get_results()) self.assertEqual(len(connections["elasticsearch"].queries), 1) self.assertEqual( connections["elasticsearch"].queries[0]["query_string"], "name:(bar)" ) # And again, for good measure. self.sq = connections["elasticsearch"].query("elasticsearch") self.sq.add_filter(SQ(name="bar")) self.sq.add_filter(SQ(text="moof")) len(self.sq.get_results()) self.assertEqual(len(connections["elasticsearch"].queries), 2) self.assertEqual( connections["elasticsearch"].queries[0]["query_string"], "name:(bar)" ) self.assertEqual( connections["elasticsearch"].queries[1]["query_string"], "(name:(bar) AND text:(moof))", ) lssqstc_all_loaded = None @override_settings(DEBUG=True)
LiveElasticsearch7SearchQueryTestCase
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 70181, "end": 70892 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "project_card_id", "repository_id", "title", "body", "client_mutation_id", ) project_card_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="projectCardId" ) repository_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="repositoryId" ) title = sgqlc.types.Field(String, graphql_name="title") body = sgqlc.types.Field(String, graphql_name="body") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
ConvertProjectCardNoteToIssueInput