language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pytest-dev__pytest-django
tests/test_db_setup.py
{ "start": 10704, "end": 12718 }
class ____: db_settings: ClassVar = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": "db_name", "TEST": {"NAME": "test_custom_db_name"}, } } def test_db_with_tox_suffix( self, django_pytester: DjangoPytester, monkeypatch: pytest.MonkeyPatch, ) -> None: "A test to check that Tox DB suffix works when running in parallel." monkeypatch.setenv("TOX_PARALLEL_ENV", "py37-django22") django_pytester.create_test_module( """ import pytest from django.db import connections @pytest.mark.django_db def test_inner(): (conn, ) = connections.all() assert conn.vendor == 'sqlite' db_name = conn.creation._get_test_db_name() assert db_name == 'test_custom_db_name_py37-django22' """ ) result = django_pytester.runpytest_subprocess("--tb=short", "-vv") assert result.ret == 0 result.stdout.fnmatch_lines(["*test_inner*PASSED*"]) def test_db_with_empty_tox_suffix( self, django_pytester: DjangoPytester, monkeypatch: pytest.MonkeyPatch, ) -> None: "A test to check that Tox DB suffix is not used when suffix would be empty." monkeypatch.setenv("TOX_PARALLEL_ENV", "") django_pytester.create_test_module( """ import pytest from django.db import connections @pytest.mark.django_db def test_inner(): (conn,) = connections.all() assert conn.vendor == 'sqlite' db_name = conn.creation._get_test_db_name() assert db_name == 'test_custom_db_name' """ ) result = django_pytester.runpytest_subprocess("--tb=short", "-vv") assert result.ret == 0 result.stdout.fnmatch_lines(["*test_inner*PASSED*"])
TestSqliteWithTox
python
doocs__leetcode
solution/3300-3399/3317.Find the Number of Possible Ways for an Event/Solution.py
{ "start": 0, "end": 472 }
class ____: def numberOfWays(self, n: int, x: int, y: int) -> int: mod = 10**9 + 7 f = [[0] * (x + 1) for _ in range(n + 1)] f[0][0] = 1 for i in range(1, n + 1): for j in range(1, x + 1): f[i][j] = (f[i - 1][j] * j + f[i - 1][j - 1] * (x - (j - 1))) % mod ans, p = 0, 1 for j in range(1, x + 1): p = p * y % mod ans = (ans + f[n][j] * p) % mod return ans
Solution
python
python__mypy
mypyc/analysis/dataflow.py
{ "start": 13847, "end": 19519 }
class ____(BaseAnalysisVisitor[Value]): def visit_branch(self, op: Branch) -> GenAndKill[Value]: return non_trivial_sources(op), set() def visit_return(self, op: Return) -> GenAndKill[Value]: if not isinstance(op.value, (Integer, Float)): return {op.value}, set() else: return set(), set() def visit_unreachable(self, op: Unreachable) -> GenAndKill[Value]: return set(), set() def visit_register_op(self, op: RegisterOp) -> GenAndKill[Value]: gen = non_trivial_sources(op) if not op.is_void: return gen, {op} else: return gen, set() def visit_assign(self, op: Assign) -> GenAndKill[Value]: return non_trivial_sources(op), {op.dest} def visit_assign_multi(self, op: AssignMulti) -> GenAndKill[Value]: return non_trivial_sources(op), {op.dest} def visit_set_mem(self, op: SetMem) -> GenAndKill[Value]: return non_trivial_sources(op), set() def visit_inc_ref(self, op: IncRef) -> GenAndKill[Value]: return set(), set() def visit_dec_ref(self, op: DecRef) -> GenAndKill[Value]: return set(), set() def analyze_live_regs(blocks: list[BasicBlock], cfg: CFG) -> AnalysisResult[Value]: """Calculate live registers at each CFG location. A register is live at a location if it can be read along some CFG path starting from the location. """ return run_analysis( blocks=blocks, cfg=cfg, gen_and_kill=LivenessVisitor(), initial=set(), backward=True, kind=MAYBE_ANALYSIS, ) # Analysis kinds MUST_ANALYSIS = 0 MAYBE_ANALYSIS = 1 def run_analysis( blocks: list[BasicBlock], cfg: CFG, gen_and_kill: OpVisitor[GenAndKill[T]], initial: set[T], kind: int, backward: bool, universe: set[T] | None = None, ) -> AnalysisResult[T]: """Run a general set-based data flow analysis. Args: blocks: All basic blocks cfg: Control-flow graph for the code gen_and_kill: Implementation of gen and kill functions for each op initial: Value of analysis for the entry points (for a forward analysis) or the exit points (for a backward analysis) kind: MUST_ANALYSIS or MAYBE_ANALYSIS backward: If False, the analysis is a forward analysis; it's backward otherwise universe: For a must analysis, the set of all possible values. This is the starting value for the work list algorithm, which will narrow this down until reaching a fixed point. For a maybe analysis the iteration always starts from an empty set and this argument is ignored. Return analysis results: (before, after) """ block_gen = {} block_kill = {} # Calculate kill and gen sets for entire basic blocks. for block in blocks: gen: set[T] = set() kill: set[T] = set() ops = block.ops if backward: ops = list(reversed(ops)) for op in ops: opgen, opkill = op.accept(gen_and_kill) gen = (gen - opkill) | opgen kill = (kill - opgen) | opkill block_gen[block] = gen block_kill[block] = kill # Set up initial state for worklist algorithm. worklist = list(blocks) if not backward: worklist.reverse() # Reverse for a small performance improvement workset = set(worklist) before: dict[BasicBlock, set[T]] = {} after: dict[BasicBlock, set[T]] = {} for block in blocks: if kind == MAYBE_ANALYSIS: before[block] = set() after[block] = set() else: assert universe is not None, "Universe must be defined for a must analysis" before[block] = set(universe) after[block] = set(universe) if backward: pred_map = cfg.succ succ_map = cfg.pred else: pred_map = cfg.pred succ_map = cfg.succ # Run work list algorithm to generate in and out sets for each basic block. while worklist: label = worklist.pop() workset.remove(label) if pred_map[label]: new_before: set[T] | None = None for pred in pred_map[label]: if new_before is None: new_before = set(after[pred]) elif kind == MAYBE_ANALYSIS: new_before |= after[pred] else: new_before &= after[pred] assert new_before is not None else: new_before = set(initial) before[label] = new_before new_after = (new_before - block_kill[label]) | block_gen[label] if new_after != after[label]: for succ in succ_map[label]: if succ not in workset: worklist.append(succ) workset.add(succ) after[label] = new_after # Run algorithm for each basic block to generate opcode-level sets. op_before: dict[tuple[BasicBlock, int], set[T]] = {} op_after: dict[tuple[BasicBlock, int], set[T]] = {} for block in blocks: label = block cur = before[label] ops_enum: Iterator[tuple[int, Op]] = enumerate(block.ops) if backward: ops_enum = reversed(list(ops_enum)) for idx, op in ops_enum: op_before[label, idx] = cur opgen, opkill = op.accept(gen_and_kill) cur = (cur - opkill) | opgen op_after[label, idx] = cur if backward: op_after, op_before = op_before, op_after return AnalysisResult(op_before, op_after)
LivenessVisitor
python
ansible__ansible
lib/ansible/errors/__init__.py
{ "start": 5923, "end": 6027 }
class ____(AnsibleError): """The requested config entry is not defined."""
AnsibleUndefinedConfigEntry
python
jazzband__tablib
src/tablib/formats/_df.py
{ "start": 118, "end": 1112 }
class ____: title = 'df' extensions = ('df',) @classmethod def detect(cls, stream): """Returns True if given stream is a DataFrame.""" if DataFrame is None: return False elif isinstance(stream, DataFrame): return True try: DataFrame(stream.read()) return True except ValueError: return False @classmethod def export_set(cls, dset): """Returns DataFrame representation of DataBook.""" if DataFrame is None: raise NotImplementedError( 'DataFrame Format requires `pandas` to be installed.' ' Try `pip install "tablib[pandas]"`.') dataframe = DataFrame(dset.dict, columns=dset.headers) return dataframe @classmethod def import_set(cls, dset, in_stream): """Returns dataset from DataFrame.""" dset.wipe() dset.dict = in_stream.to_dict(orient='records')
DataFrameFormat
python
getsentry__sentry
tests/sentry/sentry_metrics/consumers/test_last_seen_updater.py
{ "start": 2221, "end": 5060 }
class ____(TestCase): @staticmethod def processing_factory(): return LastSeenUpdaterStrategyFactory( ingest_profile="release-health", indexer_db="postgres", max_batch_time=1.0, max_batch_size=1, ) def setUp(self) -> None: self.org_id = 1234 self.stale_id = 2001 self.fresh_id = 2002 self.stale_last_seen = timezone.now() - timedelta(days=1) self.fresh_last_seen = timezone.now() - timedelta(hours=1) self.table = StringIndexer # needs to match ingest_config on line 102 self.table.objects.create( organization_id=self.org_id, string="e2e_0", id=self.stale_id, last_seen=self.stale_last_seen, ) self.table.objects.create( organization_id=self.org_id, string="e2e_1", id=self.fresh_id, last_seen=self.fresh_last_seen, ) def tearDown(self) -> None: self.table.objects.filter(id=self.fresh_id).delete() self.table.objects.filter(id=self.stale_id).delete() def test_basic_flow(self) -> None: # we can't use fixtures with unittest.TestCase commit = Mock() message = kafka_message(headerless_kafka_payload(mixed_payload())) factory = self.processing_factory() processing_strategy = factory.create_with_partitions( commit, {Partition(Topic("fake-topic"), 0): 0} ) processing_strategy.submit(message) processing_strategy.poll() processing_strategy.join(1) fresh_item = self.table.objects.get(id=self.fresh_id) assert fresh_item.last_seen == self.fresh_last_seen stale_item = self.table.objects.get(id=self.stale_id) # without doing a bunch of mocking around time objects, stale_item.last_seen # should be approximately equal to timezone.now() but they won't be perfectly equal assert (timezone.now() - stale_item.last_seen) < timedelta(seconds=30) factory.shutdown() def test_message_processes_after_bad_message(self) -> None: commit = Mock() ok_message = kafka_message(headerless_kafka_payload(mixed_payload())) bad_message = kafka_message(headerless_kafka_payload(bad_payload())) factory = self.processing_factory() processing_strategy = factory.create_with_partitions( commit, {Partition(Topic("fake-topic"), 0): 0} ) processing_strategy.submit(bad_message) processing_strategy.submit(ok_message) processing_strategy.poll() processing_strategy.join(1) stale_item = self.table.objects.get(id=self.stale_id) assert stale_item.last_seen > self.stale_last_seen factory.shutdown()
TestLastSeenUpdaterEndToEnd
python
tensorflow__tensorflow
tensorflow/python/framework/errors_impl.py
{ "start": 8574, "end": 9389 }
class ____(OpError): """Raised when an operation is cancelled. For example, a long-running operation e.g.`tf.queue.QueueBase.enqueue`, or a `tf.function` call may be cancelled by either running another operation e.g. `tf.queue.QueueBase.close` or a remote worker failure. This long-running operation will fail by raising `CancelledError`. Example: >>> q = tf.queue.FIFOQueue(10, tf.float32, ((),)) >>> q.enqueue((10.0,)) >>> q.close() >>> q.enqueue((10.0,)) Traceback (most recent call last): ... CancelledError: ... """ def __init__(self, node_def, op, message, *args): """Creates a `CancelledError`.""" super(CancelledError, self).__init__(node_def, op, message, CANCELLED, *args) @tf_export("errors.UnknownError")
CancelledError
python
huggingface__transformers
src/transformers/models/perception_lm/modular_perception_lm.py
{ "start": 4259, "end": 5768 }
class ____(LlavaCausalLMOutputWithPast): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. Image hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_videos, sequence_length, hidden_size)`. Video hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ video_hidden_states: Optional[torch.FloatTensor] = None @auto_docstring
PerceptionLMCausalLMOutputWithPast
python
kennethreitz__tablib
src/tablib/packages/dbfpy/fields.py
{ "start": 8761, "end": 9182 }
class ____(DbfFieldDef): """Definition of the integer field.""" typeCode = "I" length = 4 defaultValue = 0 def decodeValue(self, value): """Return an integer number decoded from ``value``.""" return struct.unpack("<i", value)[0] def encodeValue(self, value): """Return string containing encoded ``value``.""" return struct.pack("<i", int(value))
DbfIntegerFieldDef
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/events.py
{ "start": 4722, "end": 5245 }
class ____(NodeEvent): __slots__ = 'tag', 'implicit', 'value', 'style' def __init__( self, anchor, tag, implicit, value, start_mark=None, end_mark=None, style=None, comment=None, ): # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) self.tag = tag self.implicit = implicit self.value = value self.style = style
ScalarEvent
python
Netflix__metaflow
metaflow/exception.py
{ "start": 3211, "end": 3291 }
class ____(MetaflowException): headline = "Tagging error"
MetaflowTaggingError
python
getsentry__sentry
src/sentry/sentry_metrics/querying/data/execution.py
{ "start": 20412, "end": 30456 }
class ____: """ Represents an executor that is responsible for scheduling execution of the supplied ScheduledQuery(s). """ def __init__(self, organization: Organization, projects: Sequence[Project], referrer: str): self._organization = organization self._projects = projects self._referrer = referrer # List of queries scheduled for execution which will change based on the progress that the execution has. self._scheduled_queries: list[ScheduledQuery | None] = [] # List of query results that will be populated during query execution. self._query_results: list[PartialQueryResult | QueryResult | None] = [] # Tracks the number of queries that have been executed (for measuring purposes). self._number_of_executed_queries = 0 def _build_request(self, query: MetricsQuery) -> Request: """ Builds a Snuba Request given a MetricsQuery to execute. Returns: A Snuba Request object which contains the query to execute. """ return Request( # The dataset used here is arbitrary, since the `run_query` function will infer it internally. dataset=Dataset.Metrics.value, query=query, app_id="default", tenant_ids={"referrer": self._referrer, "organization_id": self._organization.id}, ) def _build_request_for_partial( self, query: MetricsQuery, partial_query_result: PartialQueryResult ) -> Request: """ Builds a Snuba Request given a PartialQueryResult by applying the filters of the last query in the partial result to the MetricsQuery that we want to execute. Returns: A Snuba Request object which contains the transformed query to execute. """ # We compute the groups that were returned by the query that was executed. We then inject those groups in each # `Timeseries` of the next query to execute. We do this in order to have at least the same groups returned by # the next query. # # Note that the mutation we do is not reflected in the queries that are returned as part of the # `QueryResult`(s) but since we do not need this data we can leave it out. _, last_query_result, _ = partial_query_result.previous_queries[0] next_metrics_query = _push_down_group_filters( query, # For now, we take the last result which will be the only one since we run at most two chained queries, # namely totals and series. _extract_groups_from_seq(last_query_result["data"]), ) return self._build_request(next_metrics_query) def _bulk_run_query(self, requests: list[Request]) -> list[Mapping[str, Any]]: """ Wraps the bulk_run_query method with some additional metrics and error handling. Returns: The results of the bulk_run_query method. """ self._number_of_executed_queries += len(requests) try: with metrics.timer(key="ddm.metrics_api.execution.bulk_execution_time"): return bulk_run_query(requests) except SnubaError as e: sentry_sdk.capture_exception(e) raise MetricsQueryExecutionError("An error occurred while executing the query") from e def _bulk_execute(self) -> bool: """ Executes all the scheduled queries in _scheduled_queries and merges the results into _query_results. This method must be called in a loop since it advances the execution one step at a time by parallelizing as much as possible all the queries that have to be executed and that have no sequential dependency defined via next. Returns: A boolean which is True when more queries can be executed or False otherwise. """ # We create all the requests that can be run in bulk, by checking the scheduled queries that we can run. bulk_requests = [] mappings = [] for query_index, scheduled_query in enumerate(self._scheduled_queries): if scheduled_query is None: continue previous_result = self._query_results[query_index] metrics_query = scheduled_query.metrics_query if previous_result is None: bulk_requests.append(self._build_request(metrics_query)) elif isinstance(previous_result, PartialQueryResult): bulk_requests.append( self._build_request_for_partial(metrics_query, previous_result) ) mappings.append(query_index) # If we have no more requests to run, we can stop the execution. if not bulk_requests: return False # We execute all the requests in bulk and for each result we decide what to do based on the next query and the # previous result in the `_query_results` array. bulk_results = self._bulk_run_query(bulk_requests) for query_index, query_result in zip(mappings, bulk_results): query_result = cast(dict[str, Any], query_result) scheduled_query = self._scheduled_queries[query_index] if scheduled_query is None: continue # If the query is a totals query and has dynamic limit, we want to check if we were able to load more groups # or not. has_more = False if scheduled_query.type == ScheduledQueryType.TOTALS and scheduled_query.dynamic_limit: data = query_result["data"] limit = scheduled_query.metrics_query.limit.limit # We take only the first n - 1 elements, since we have 1 element more of lookahead which is used to # determine if there are more groups. query_result["data"] = data[: limit - 1] has_more = len(data) >= limit previous_result = self._query_results[query_index] if scheduled_query.next is None: if previous_result is None: self._query_results[query_index] = QueryResult.from_scheduled_query( scheduled_query, query_result, has_more ) elif isinstance(previous_result, PartialQueryResult): first_result = previous_result.to_query_result() second_result = QueryResult.from_scheduled_query( scheduled_query, query_result, has_more ) merged_result = first_result.merge(second_result) merged_result.align_series_to_totals(self._organization) self._query_results[query_index] = merged_result else: current_query = (scheduled_query, query_result, has_more) if previous_result is None: self._query_results[query_index] = PartialQueryResult( previous_queries=[current_query], ) elif isinstance(previous_result, PartialQueryResult): previous_result.previous_queries.append(current_query) # We bump the next query after the results have been merged, so that the next call to the function will # execute the next queries in the chain. self._scheduled_queries[query_index] = scheduled_query.next return True def _execution_loop(self) -> None: """ Executes the next batch of queries until no query is left. """ continue_execution = True while continue_execution: continue_execution = self._bulk_execute() def execute(self) -> list[QueryResult]: """ Executes the scheduled queries in the execution loop. Returns: The results of the scheduled queries. """ if not self._scheduled_queries: return [] with metrics.timer(key="ddm.metrics_api.execution.total_execution_time"): self._execution_loop() metrics.distribution( key="ddm.metrics_api.execution.number_of_executed_queries", value=self._number_of_executed_queries, ) for query_result in self._query_results: if not isinstance(query_result, QueryResult): raise MetricsQueryExecutionError( "Not all queries were executed in the execution loop" ) return cast(list[QueryResult], self._query_results) def schedule(self, intermediate_query: IntermediateQuery, query_type: QueryType) -> None: """ Lazily schedules an IntermediateQuery for execution and runs initialization code for each ScheduledQuery. """ # By default, we always want to have a totals query. totals_query = ScheduledQuery( type=ScheduledQueryType.TOTALS, metrics_query=intermediate_query.metrics_query, order=intermediate_query.order, limit=intermediate_query.limit, unit_family=intermediate_query.unit_family, unit=intermediate_query.unit, scaling_factor=intermediate_query.scaling_factor, mappers=intermediate_query.mappers, ) # In case the user chooses to run also a series query, we will duplicate the query and chain it after totals. series_query = None if query_type == QueryType.TOTALS_AND_SERIES: series_query = replace(totals_query, type=ScheduledQueryType.SERIES) # We initialize the query by performing type-aware mutations that prepare the query to be executed correctly # (e.g., adding `totals` to a totals query...). final_query = replace(totals_query, next=series_query).initialize( self._organization, self._projects ) self._scheduled_queries.append(final_query) self._query_results.append(None)
QueryExecutor
python
doocs__leetcode
solution/0100-0199/0159.Longest Substring with At Most Two Distinct Characters/Solution.py
{ "start": 0, "end": 396 }
class ____: def lengthOfLongestSubstringTwoDistinct(self, s: str) -> int: cnt = Counter() ans = j = 0 for i, c in enumerate(s): cnt[c] += 1 while len(cnt) > 2: cnt[s[j]] -= 1 if cnt[s[j]] == 0: cnt.pop(s[j]) j += 1 ans = max(ans, i - j + 1) return ans
Solution
python
huggingface__transformers
src/transformers/models/dinov2/modeling_dinov2.py
{ "start": 16741, "end": 18181 }
class ____(PreTrainedModel): config: Dinov2Config base_model_prefix = "dinov2" main_input_name = "pixel_values" input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["Dinov2Layer"] _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True _can_record_outputs = { "attentions": Dinov2SelfAttention, } @torch.no_grad() def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, nn.LayerNorm): init.zeros_(module.bias) init.ones_(module.weight) elif isinstance(module, Dinov2Embeddings): init.trunc_normal_(module.position_embeddings, mean=0.0, std=self.config.initializer_range) init.trunc_normal_(module.cls_token, mean=0.0, std=self.config.initializer_range) if self.config.use_mask_token: init.zeros_(module.mask_token) elif isinstance(module, Dinov2LayerScale): init.constant_(module.lambda1, self.config.layerscale_value) @auto_docstring
Dinov2PreTrainedModel
python
huggingface__transformers
src/transformers/models/qwen2/modular_qwen2.py
{ "start": 9244, "end": 9525 }
class ____(LlamaForQuestionAnswering): pass __all__ = [ "Qwen2PreTrainedModel", "Qwen2Model", "Qwen2ForCausalLM", "Qwen2RMSNorm", "Qwen2ForSequenceClassification", "Qwen2ForTokenClassification", "Qwen2ForQuestionAnswering", ]
Qwen2ForQuestionAnswering
python
mlflow__mlflow
mlflow/models/evaluation/validation.py
{ "start": 5735, "end": 5992 }
class ____(MlflowException): def __init__(self, _message, **kwargs): message = "Could not instantiate MetricThreshold class: " + _message super().__init__(message, error_code=INVALID_PARAMETER_VALUE, **kwargs)
MetricThresholdClassException
python
scipy__scipy
scipy/io/_mmio.py
{ "start": 6555, "end": 32076 }
class ____: __slots__ = ('_rows', '_cols', '_entries', '_format', '_field', '_symmetry') @property def rows(self): return self._rows @property def cols(self): return self._cols @property def entries(self): return self._entries @property def format(self): return self._format @property def field(self): return self._field @property def symmetry(self): return self._symmetry @property def has_symmetry(self): return self._symmetry in (self.SYMMETRY_SYMMETRIC, self.SYMMETRY_SKEW_SYMMETRIC, self.SYMMETRY_HERMITIAN) # format values FORMAT_COORDINATE = 'coordinate' FORMAT_ARRAY = 'array' FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY) @classmethod def _validate_format(self, format): if format not in self.FORMAT_VALUES: msg = f'unknown format type {format}, must be one of {self.FORMAT_VALUES}' raise ValueError(msg) # field values FIELD_INTEGER = 'integer' FIELD_UNSIGNED = 'unsigned-integer' FIELD_REAL = 'real' FIELD_COMPLEX = 'complex' FIELD_PATTERN = 'pattern' FIELD_VALUES = (FIELD_INTEGER, FIELD_UNSIGNED, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN) @classmethod def _validate_field(self, field): if field not in self.FIELD_VALUES: msg = f'unknown field type {field}, must be one of {self.FIELD_VALUES}' raise ValueError(msg) # symmetry values SYMMETRY_GENERAL = 'general' SYMMETRY_SYMMETRIC = 'symmetric' SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric' SYMMETRY_HERMITIAN = 'hermitian' SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC, SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN) @classmethod def _validate_symmetry(self, symmetry): if symmetry not in self.SYMMETRY_VALUES: raise ValueError(f'unknown symmetry type {symmetry}, ' f'must be one of {self.SYMMETRY_VALUES}') DTYPES_BY_FIELD = {FIELD_INTEGER: 'intp', FIELD_UNSIGNED: 'uint64', FIELD_REAL: 'd', FIELD_COMPLEX: 'D', FIELD_PATTERN: 'd'} # ------------------------------------------------------------------------- @staticmethod def reader(): pass # ------------------------------------------------------------------------- @staticmethod def writer(): pass # ------------------------------------------------------------------------- @classmethod def info(self, source): """ Return size, storage parameters from Matrix Market file-like 'source'. Parameters ---------- source : str or file-like Matrix Market filename (extension .mtx) or open file-like object Returns ------- rows : int Number of matrix rows. cols : int Number of matrix columns. entries : int Number of non-zero entries of a sparse matrix or rows*cols for a dense matrix. format : str Either 'coordinate' or 'array'. field : str Either 'real', 'complex', 'pattern', or 'integer'. symmetry : str Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. """ stream, close_it = self._open(source) try: # read and validate header line line = stream.readline() mmid, matrix, format, field, symmetry = \ (asstr(part.strip()) for part in line.split()) if not mmid.startswith('%%MatrixMarket'): raise ValueError('source is not in Matrix Market format') if not matrix.lower() == 'matrix': raise ValueError("Problem reading file header: " + line) # http://math.nist.gov/MatrixMarket/formats.html if format.lower() == 'array': format = self.FORMAT_ARRAY elif format.lower() == 'coordinate': format = self.FORMAT_COORDINATE # skip comments # line.startswith('%') while line: if line.lstrip() and line.lstrip()[0] in ['%', 37]: line = stream.readline() else: break # skip empty lines while not line.strip(): line = stream.readline() split_line = line.split() if format == self.FORMAT_ARRAY: if not len(split_line) == 2: raise ValueError("Header line not of length 2: " + line.decode('ascii')) rows, cols = map(int, split_line) entries = rows * cols else: if not len(split_line) == 3: raise ValueError("Header line not of length 3: " + line.decode('ascii')) rows, cols, entries = map(int, split_line) return (rows, cols, entries, format, field.lower(), symmetry.lower()) finally: if close_it: stream.close() # ------------------------------------------------------------------------- @staticmethod def _open(filespec, mode='rb'): """ Return an open file stream for reading based on source. If source is a file name, open it (after trying to find it with mtx and gzipped mtx extensions). Otherwise, just return source. Parameters ---------- filespec : str or file-like String giving file name or file-like object mode : str, optional Mode with which to open file, if `filespec` is a file name. Returns ------- fobj : file-like Open file-like object. close_it : bool True if the calling function should close this file when done, false otherwise. """ # If 'filespec' is path-like (str, pathlib.Path, os.DirEntry, other class # implementing a '__fspath__' method), try to convert it to str. If this # fails by throwing a 'TypeError', assume it's an open file handle and # return it as-is. try: filespec = os.fspath(filespec) except TypeError: return filespec, False # 'filespec' is definitely a str now # open for reading if mode[0] == 'r': # determine filename plus extension if not os.path.isfile(filespec): if os.path.isfile(filespec+'.mtx'): filespec = filespec + '.mtx' elif os.path.isfile(filespec+'.mtx.gz'): filespec = filespec + '.mtx.gz' elif os.path.isfile(filespec+'.mtx.bz2'): filespec = filespec + '.mtx.bz2' # open filename if filespec.endswith('.gz'): import gzip stream = gzip.open(filespec, mode) elif filespec.endswith('.bz2'): import bz2 stream = bz2.BZ2File(filespec, 'rb') else: stream = open(filespec, mode) # open for writing else: if filespec[-4:] != '.mtx': filespec = filespec + '.mtx' stream = open(filespec, mode) return stream, True # ------------------------------------------------------------------------- @staticmethod def _get_symmetry(a): m, n = a.shape if m != n: return MMFile.SYMMETRY_GENERAL issymm = True isskew = True isherm = a.dtype.char in 'FD' # sparse input if issparse(a): # check if number of nonzero entries of lower and upper triangle # matrix are equal a = a.tocoo() (row, col) = a.nonzero() if (row < col).sum() != (row > col).sum(): return MMFile.SYMMETRY_GENERAL # define iterator over symmetric pair entries a = a.todok() def symm_iterator(): for ((i, j), aij) in a.items(): if i > j: aji = a[j, i] yield (aij, aji, False) elif i == j: yield (aij, aij, True) # non-sparse input else: # define iterator over symmetric pair entries def symm_iterator(): for j in range(n): for i in range(j, n): aij, aji = a[i][j], a[j][i] yield (aij, aji, i == j) # check for symmetry # yields aij, aji, is_diagonal for (aij, aji, is_diagonal) in symm_iterator(): if isskew and is_diagonal and aij != 0: isskew = False else: if issymm and aij != aji: issymm = False with np.errstate(over="ignore"): # This can give a warning for uint dtypes, so silence that if isskew and aij != -aji: isskew = False if isherm and aij != conj(aji): isherm = False if not (issymm or isskew or isherm): break # return symmetry value if issymm: return MMFile.SYMMETRY_SYMMETRIC if isskew: return MMFile.SYMMETRY_SKEW_SYMMETRIC if isherm: return MMFile.SYMMETRY_HERMITIAN return MMFile.SYMMETRY_GENERAL # ------------------------------------------------------------------------- @staticmethod def _field_template(field, precision): return {MMFile.FIELD_REAL: '%%.%ie\n' % precision, MMFile.FIELD_INTEGER: '%i\n', MMFile.FIELD_UNSIGNED: '%u\n', MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % (precision, precision) }.get(field, None) # ------------------------------------------------------------------------- def __init__(self, **kwargs): self._init_attrs(**kwargs) # ------------------------------------------------------------------------- def read(self, source, *, spmatrix=True): """ Reads the contents of a Matrix Market file-like 'source' into a matrix. Parameters ---------- source : str or file-like Matrix Market filename (extensions .mtx, .mtz.gz) or open file object. spmatrix : bool, optional (default: True) If ``True``, return sparse matrix. Otherwise return sparse array. Returns ------- a : ndarray or coo_array or coo_matrix Dense or sparse array depending on the matrix format in the Matrix Market file. """ stream, close_it = self._open(source) try: self._parse_header(stream) data = self._parse_body(stream) finally: if close_it: stream.close() if spmatrix and isinstance(data, coo_array): data = coo_matrix(data) return data # ------------------------------------------------------------------------- def write(self, target, a, comment='', field=None, precision=None, symmetry=None): """ Writes sparse or dense array `a` to Matrix Market file-like `target`. Parameters ---------- target : str or file-like Matrix Market filename (extension .mtx) or open file-like object. a : array like Sparse or dense 2-D array. comment : str, optional Comments to be prepended to the Matrix Market file. field : None or str, optional Either 'real', 'complex', 'pattern', or 'integer'. precision : None or int, optional Number of digits to display for real or complex values. symmetry : None or str, optional Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'. If symmetry is None the symmetry type of 'a' is determined by its values. """ stream, close_it = self._open(target, 'wb') try: self._write(stream, a, comment, field, precision, symmetry) finally: if close_it: stream.close() else: stream.flush() # ------------------------------------------------------------------------- def _init_attrs(self, **kwargs): """ Initialize each attributes with the corresponding keyword arg value or a default of None """ attrs = self.__class__.__slots__ public_attrs = [attr[1:] for attr in attrs] invalid_keys = set(kwargs.keys()) - set(public_attrs) if invalid_keys: raise ValueError(f"found {tuple(invalid_keys)} invalid keyword " f"arguments, please only use {public_attrs}") for attr in attrs: setattr(self, attr, kwargs.get(attr[1:], None)) # ------------------------------------------------------------------------- def _parse_header(self, stream): rows, cols, entries, format, field, symmetry = \ self.__class__.info(stream) self._init_attrs(rows=rows, cols=cols, entries=entries, format=format, field=field, symmetry=symmetry) # ------------------------------------------------------------------------- def _parse_body(self, stream): rows, cols, entries, format, field, symm = (self.rows, self.cols, self.entries, self.format, self.field, self.symmetry) dtype = self.DTYPES_BY_FIELD.get(field, None) has_symmetry = self.has_symmetry is_integer = field == self.FIELD_INTEGER is_unsigned_integer = field == self.FIELD_UNSIGNED is_complex = field == self.FIELD_COMPLEX is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC is_herm = symm == self.SYMMETRY_HERMITIAN is_pattern = field == self.FIELD_PATTERN if format == self.FORMAT_ARRAY: a = zeros((rows, cols), dtype=dtype) line = 1 i, j = 0, 0 if is_skew: a[i, j] = 0 if i < rows - 1: i += 1 while line: line = stream.readline() # line.startswith('%') if not line or line[0] in ['%', 37] or not line.strip(): continue if is_integer: aij = int(line) elif is_unsigned_integer: aij = int(line) elif is_complex: aij = complex(*map(float, line.split())) else: aij = float(line) a[i, j] = aij if has_symmetry and i != j: if is_skew: a[j, i] = -aij elif is_herm: a[j, i] = conj(aij) else: a[j, i] = aij if i < rows-1: i = i + 1 else: j = j + 1 if not has_symmetry: i = 0 else: i = j if is_skew: a[i, j] = 0 if i < rows-1: i += 1 if is_skew: if not (i in [0, j] and j == cols - 1): raise ValueError("Parse error, did not read all lines.") else: if not (i in [0, j] and j == cols): raise ValueError("Parse error, did not read all lines.") elif format == self.FORMAT_COORDINATE: # Read sparse COOrdinate format if entries == 0: # empty matrix return coo_array((rows, cols), dtype=dtype) I = zeros(entries, dtype='intc') J = zeros(entries, dtype='intc') if is_pattern: V = ones(entries, dtype='int8') elif is_integer: V = zeros(entries, dtype='intp') elif is_unsigned_integer: V = zeros(entries, dtype='uint64') elif is_complex: V = zeros(entries, dtype='complex') else: V = zeros(entries, dtype='float') entry_number = 0 for line in stream: # line.startswith('%') if not line or line[0] in ['%', 37] or not line.strip(): continue if entry_number+1 > entries: raise ValueError("'entries' in header is smaller than " "number of entries") l = line.split() I[entry_number], J[entry_number] = map(int, l[:2]) if not is_pattern: if is_integer: V[entry_number] = int(l[2]) elif is_unsigned_integer: V[entry_number] = int(l[2]) elif is_complex: V[entry_number] = complex(*map(float, l[2:])) else: V[entry_number] = float(l[2]) entry_number += 1 if entry_number < entries: raise ValueError("'entries' in header is larger than " "number of entries") I -= 1 # adjust indices (base 1 -> base 0) J -= 1 if has_symmetry: mask = (I != J) # off diagonal mask od_I = I[mask] od_J = J[mask] od_V = V[mask] I = concatenate((I, od_J)) J = concatenate((J, od_I)) if is_skew: od_V *= -1 elif is_herm: od_V = od_V.conjugate() V = concatenate((V, od_V)) a = coo_array((V, (I, J)), shape=(rows, cols), dtype=dtype) else: raise NotImplementedError(format) return a # ------------------------------------------------------------------------ def _write(self, stream, a, comment='', field=None, precision=None, symmetry=None): if isinstance(a, list) or isinstance(a, ndarray) or \ isinstance(a, tuple) or hasattr(a, '__array__'): rep = self.FORMAT_ARRAY a = asarray(a) if len(a.shape) != 2: raise ValueError('Expected 2 dimensional array') rows, cols = a.shape if field is not None: if field == self.FIELD_INTEGER: if not can_cast(a.dtype, 'intp'): raise OverflowError("mmwrite does not support integer " "dtypes larger than native 'intp'.") a = a.astype('intp') elif field == self.FIELD_REAL: if a.dtype.char not in 'fd': a = a.astype('d') elif field == self.FIELD_COMPLEX: if a.dtype.char not in 'FD': a = a.astype('D') else: if not issparse(a): raise ValueError(f'unknown matrix type: {type(a)}') rep = 'coordinate' rows, cols = a.shape typecode = a.dtype.char if precision is None: if typecode in 'fF': precision = 8 else: precision = 16 if field is None: kind = a.dtype.kind if kind == 'i': if not can_cast(a.dtype, 'intp'): raise OverflowError("mmwrite does not support integer " "dtypes larger than native 'intp'.") field = 'integer' elif kind == 'f': field = 'real' elif kind == 'c': field = 'complex' elif kind == 'u': field = 'unsigned-integer' else: raise TypeError('unexpected dtype kind ' + kind) if symmetry is None: symmetry = self._get_symmetry(a) # validate rep, field, and symmetry self.__class__._validate_format(rep) self.__class__._validate_field(field) self.__class__._validate_symmetry(symmetry) # write initial header line data = f'%%MatrixMarket matrix {rep} {field} {symmetry}\n' stream.write(data.encode('latin1')) # write comments for line in comment.split('\n'): data = f'%{line}\n' stream.write(data.encode('latin1')) template = self._field_template(field, precision) # write dense format if rep == self.FORMAT_ARRAY: # write shape spec data = '%i %i\n' % (rows, cols) stream.write(data.encode('latin1')) if field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED): if symmetry == self.SYMMETRY_GENERAL: for j in range(cols): for i in range(rows): data = template % a[i, j] stream.write(data.encode('latin1')) elif symmetry == self.SYMMETRY_SKEW_SYMMETRIC: for j in range(cols): for i in range(j + 1, rows): data = template % a[i, j] stream.write(data.encode('latin1')) else: for j in range(cols): for i in range(j, rows): data = template % a[i, j] stream.write(data.encode('latin1')) elif field == self.FIELD_COMPLEX: if symmetry == self.SYMMETRY_GENERAL: for j in range(cols): for i in range(rows): aij = a[i, j] data = template % (real(aij), imag(aij)) stream.write(data.encode('latin1')) else: for j in range(cols): for i in range(j, rows): aij = a[i, j] data = template % (real(aij), imag(aij)) stream.write(data.encode('latin1')) elif field == self.FIELD_PATTERN: raise ValueError('pattern type inconsisted with dense format') else: raise TypeError(f'Unknown field type {field}') # write sparse format else: coo = a.tocoo() # convert to COOrdinate format # if symmetry format used, remove values above main diagonal if symmetry != self.SYMMETRY_GENERAL: lower_triangle_mask = coo.row >= coo.col coo = coo_array((coo.data[lower_triangle_mask], (coo.row[lower_triangle_mask], coo.col[lower_triangle_mask])), shape=coo.shape) # write shape spec data = '%i %i %i\n' % (rows, cols, coo.nnz) stream.write(data.encode('latin1')) template = self._field_template(field, precision-1) if field == self.FIELD_PATTERN: for r, c in zip(coo.row+1, coo.col+1): data = "%i %i\n" % (r, c) stream.write(data.encode('latin1')) elif field in (self.FIELD_INTEGER, self.FIELD_REAL, self.FIELD_UNSIGNED): for r, c, d in zip(coo.row+1, coo.col+1, coo.data): data = ("%i %i " % (r, c)) + (template % d) stream.write(data.encode('latin1')) elif field == self.FIELD_COMPLEX: for r, c, d in zip(coo.row+1, coo.col+1, coo.data): data = ("%i %i " % (r, c)) + (template % (d.real, d.imag)) stream.write(data.encode('latin1')) else: raise TypeError(f'Unknown field type {field}') def _is_fromfile_compatible(stream): """ Check whether `stream` is compatible with numpy.fromfile. Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with Python 3. """ bad_cls = [] try: import gzip bad_cls.append(gzip.GzipFile) except ImportError: pass try: import bz2 bad_cls.append(bz2.BZ2File) except ImportError: pass bad_cls = tuple(bad_cls) return not isinstance(stream, bad_cls)
MMFile
python
ipython__ipython
IPython/lib/pretty.py
{ "start": 10434, "end": 14965 }
class ____(PrettyPrinter): """ Special pretty printer that has a `pretty` method that calls the pretty printer for a python object. This class stores processing data on `self` so you must *never* use this class in a threaded environment. Always lock it or reinstanciate it. Instances also have a verbose flag callbacks can access to control their output. For example the default instance repr prints all attributes and methods that are not prefixed by an underscore if the printer is in verbose mode. """ def __init__(self, output, verbose=False, max_width=79, newline='\n', singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None, max_seq_length=MAX_SEQ_LENGTH): PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length) self.verbose = verbose self.stack = [] if singleton_pprinters is None: singleton_pprinters = _singleton_pprinters.copy() self.singleton_pprinters = singleton_pprinters if type_pprinters is None: type_pprinters = _type_pprinters.copy() self.type_pprinters = type_pprinters if deferred_pprinters is None: deferred_pprinters = _deferred_type_pprinters.copy() self.deferred_pprinters = deferred_pprinters def pretty(self, obj): """Pretty print the given object.""" obj_id = id(obj) cycle = obj_id in self.stack self.stack.append(obj_id) self.begin_group() try: obj_class = _safe_getattr(obj, '__class__', None) or type(obj) # First try to find registered singleton printers for the type. try: printer = self.singleton_pprinters[obj_id] except (TypeError, KeyError): pass else: return printer(obj, self, cycle) # Next walk the mro and check for either: # 1) a registered printer # 2) a _repr_pretty_ method for cls in _get_mro(obj_class): if cls in self.type_pprinters: # printer registered in self.type_pprinters return self.type_pprinters[cls](obj, self, cycle) else: # deferred printer printer = self._in_deferred_types(cls) if printer is not None: return printer(obj, self, cycle) else: # Finally look for special method names. # Some objects automatically create any requested # attribute. Try to ignore most of them by checking for # callability. if '_repr_pretty_' in cls.__dict__: meth = cls._repr_pretty_ if callable(meth): return meth(obj, self, cycle) if ( cls is not object # check if cls defines __repr__ and "__repr__" in cls.__dict__ # check if __repr__ is callable. # Note: we need to test getattr(cls, '__repr__') # instead of cls.__dict__['__repr__'] # in order to work with descriptors like partialmethod, and callable(_safe_getattr(cls, "__repr__", None)) ): return _repr_pprint(obj, self, cycle) return _default_pprint(obj, self, cycle) finally: self.end_group() self.stack.pop() def _in_deferred_types(self, cls): """ Check if the given class is specified in the deferred type registry. Returns the printer from the registry if it exists, and None if the class is not in the registry. Successful matches will be moved to the regular type registry for future use. """ mod = _safe_getattr(cls, '__module__', None) name = _safe_getattr(cls, '__name__', None) key = (mod, name) printer = None if key in self.deferred_pprinters: # Move the printer over to the regular registry. printer = self.deferred_pprinters.pop(key) self.type_pprinters[cls] = printer return printer
RepresentationPrinter
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/sensors/eks.py
{ "start": 6221, "end": 8574 }
class ____(EksBaseSensor): """ Check the state of an AWS Fargate profile until it reaches the target state or another terminal state. .. seealso:: For more information on how to use this sensor, take a look at the guide: :ref:`howto/sensor:EksFargateProfileStateSensor` :param cluster_name: The name of the Cluster which the AWS Fargate profile is attached to. (templated) :param fargate_profile_name: The name of the Fargate profile to watch. (templated) :param target_state: Target state of the Fargate profile. (templated) :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html """ template_fields: Sequence[str] = aws_template_fields( "cluster_name", "fargate_profile_name", "target_state" ) ui_color = "#ff9900" ui_fgcolor = "#232F3E" def __init__( self, *, fargate_profile_name: str, region: str | None = None, target_state: FargateProfileStates = FargateProfileStates.ACTIVE, **kwargs, ): if region is not None: warnings.warn( message="Parameter `region` is deprecated. Use the parameter `region_name` instead", category=AirflowProviderDeprecationWarning, stacklevel=2, ) kwargs["region_name"] = region super().__init__(target_state=target_state, target_state_type=FargateProfileStates, **kwargs) self.fargate_profile_name = fargate_profile_name def get_state(self) -> FargateProfileStates: return self.hook.get_fargate_profile_state( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name ) def get_terminal_states(self) -> frozenset: return FARGATE_TERMINAL_STATES
EksFargateProfileStateSensor
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 18877, "end": 19908 }
class ____: def setup_method(self): self.rng = np.random.default_rng(7836792223) def test_rvs(self): vals = stats.bernoulli.rvs(0.75, size=(2, 50), random_state=self.rng) assert_(np.all(vals >= 0) & np.all(vals <= 1)) assert_(np.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.bernoulli.rvs(0.75, random_state=self.rng) assert_(isinstance(val, int)) val = stats.bernoulli(0.75).rvs(3, random_state=self.rng) assert_(isinstance(val, np.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_entropy(self): # Simple tests of entropy. b = stats.bernoulli(0.25) expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75) h = b.entropy() assert_allclose(h, expected_h) b = stats.bernoulli(0.0) h = b.entropy() assert_equal(h, 0.0) b = stats.bernoulli(1.0) h = b.entropy() assert_equal(h, 0.0)
TestBernoulli
python
django__django
django/core/mail/backends/smtp.py
{ "start": 398, "end": 7366 }
class ____(BaseEmailBackend): """ A wrapper that manages the SMTP network connection. """ def __init__( self, host=None, port=None, username=None, password=None, use_tls=None, fail_silently=False, use_ssl=None, timeout=None, ssl_keyfile=None, ssl_certfile=None, **kwargs, ): super().__init__(fail_silently=fail_silently) self.host = host or settings.EMAIL_HOST self.port = port or settings.EMAIL_PORT self.username = settings.EMAIL_HOST_USER if username is None else username self.password = settings.EMAIL_HOST_PASSWORD if password is None else password self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout self.ssl_keyfile = ( settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile ) self.ssl_certfile = ( settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile ) if self.use_ssl and self.use_tls: raise ValueError( "EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set " "one of those settings to True." ) self.connection = None self._lock = threading.RLock() @property def connection_class(self): return smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP @cached_property def ssl_context(self): if self.ssl_certfile or self.ssl_keyfile: ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT) ssl_context.load_cert_chain(self.ssl_certfile, self.ssl_keyfile) return ssl_context else: return ssl.create_default_context() def open(self): """ Ensure an open connection to the email server. Return whether or not a new connection was required (True or False) or None if an exception passed silently. """ if self.connection: # Nothing to do if the connection is already open. return False # If local_hostname is not specified, socket.getfqdn() gets used. # For performance, we use the cached FQDN for local_hostname. connection_params = {"local_hostname": DNS_NAME.get_fqdn()} if self.timeout is not None: connection_params["timeout"] = self.timeout if self.use_ssl: connection_params["context"] = self.ssl_context try: self.connection = self.connection_class( self.host, self.port, **connection_params ) # TLS/SSL are mutually exclusive, so only attempt TLS over # non-secure connections. if not self.use_ssl and self.use_tls: self.connection.starttls(context=self.ssl_context) if self.username and self.password: self.connection.login(self.username, self.password) return True except OSError: if not self.fail_silently: raise def close(self): """Close the connection to the email server.""" if self.connection is None: return try: try: self.connection.quit() except (ssl.SSLError, smtplib.SMTPServerDisconnected): # This happens when calling quit() on a TLS connection # sometimes, or when the connection was already disconnected # by the server. self.connection.close() except smtplib.SMTPException: if self.fail_silently: return raise finally: self.connection = None def send_messages(self, email_messages): """ Send one or more EmailMessage objects and return the number of email messages sent. """ if not email_messages: return 0 with self._lock: new_conn_created = self.open() if not self.connection or new_conn_created is None: # We failed silently on open(). # Trying to send would be pointless. return 0 num_sent = 0 try: for message in email_messages: sent = self._send(message) if sent: num_sent += 1 finally: if new_conn_created: self.close() return num_sent def _send(self, email_message): """A helper method that does the actual sending.""" if not email_message.recipients(): return False from_email = self.prep_address(email_message.from_email) recipients = [self.prep_address(addr) for addr in email_message.recipients()] message = email_message.message(policy=email.policy.SMTP) try: self.connection.sendmail(from_email, recipients, message.as_bytes()) except smtplib.SMTPException: if not self.fail_silently: raise return False return True def prep_address(self, address, force_ascii=True): """ Return the addr-spec portion of an email address. Raises ValueError for invalid addresses, including CR/NL injection. If force_ascii is True, apply IDNA encoding to non-ASCII domains, and raise ValueError for non-ASCII local-parts (which can't be encoded). Otherwise, leave Unicode characters unencoded (e.g., for sending with SMTPUTF8). """ address = force_str(address) parsed = AddressHeader.value_parser(address) defects = set(str(defect) for defect in parsed.all_defects) # Django allows local mailboxes like "From: webmaster" (#15042). defects.discard("addr-spec local part with no domain") if not force_ascii: # Non-ASCII local-part is valid with SMTPUTF8. Remove once # https://github.com/python/cpython/issues/81074 is fixed. defects.discard("local-part contains non-ASCII characters)") if defects: raise ValueError(f"Invalid address {address!r}: {'; '.join(defects)}") mailboxes = parsed.all_mailboxes if len(mailboxes) != 1: raise ValueError(f"Invalid address {address!r}: must be a single address") mailbox = mailboxes[0] if force_ascii and mailbox.domain and not mailbox.domain.isascii(): # Re-compose an addr-spec with the IDNA encoded domain. domain = punycode(mailbox.domain) return str(Address(username=mailbox.local_part, domain=domain)) else: return mailbox.addr_spec
EmailBackend
python
has2k1__plotnine
plotnine/scales/scale_color.py
{ "start": 13237, "end": 13900 }
class ____(scale_datetime, scale_color_cmap): # pyright: ignore[reportIncompatibleVariableOverride] """ Datetime color scale See Also -------- plotnine.scale_color_cmap : The parent class. """ _: KW_ONLY guide: Literal["legend", "colorbar"] | None = "colorbar" def __post_init__( self, cmap_name: str, date_breaks: str | None, date_labels: str | None, date_minor_breaks: str | None, ): from mizani.palettes import cmap_pal super().__post_init__(date_breaks, date_labels, date_minor_breaks) self.palette = cmap_pal(cmap_name) @dataclass
scale_color_datetime
python
jazzband__django-model-utils
model_utils/models.py
{ "start": 5915, "end": 6215 }
class ____(models.Model): """ This abstract base class provides id field on any model that inherits from it which will be the primary key. """ id = UUIDField( primary_key=True, version=4, editable=False, ) class Meta: abstract = True
UUIDModel
python
Lightning-AI__lightning
tests/tests_pytorch/loggers/test_all.py
{ "start": 7388, "end": 7844 }
class ____(Callback): def setup(self, trainer, pl_module, stage=None): if trainer.global_rank > 0: return if isinstance(trainer.logger, MLFlowLogger): assert trainer.logger._mlflow_client elif isinstance(trainer.logger, NeptuneLogger): assert trainer.logger._run_instance elif hasattr(trainer.logger, "_experiment"): assert trainer.logger._experiment
LazyInitExperimentCheck
python
django__django
tests/queries/tests.py
{ "start": 90331, "end": 91738 }
class ____(unittest.TestCase): """ Tests for the Queryset.ordered attribute. """ def test_no_default_or_explicit_ordering(self): self.assertIs(Annotation.objects.all().ordered, False) def test_cleared_default_ordering(self): self.assertIs(Tag.objects.all().ordered, True) self.assertIs(Tag.objects.order_by().ordered, False) def test_explicit_ordering(self): self.assertIs(Annotation.objects.order_by("id").ordered, True) def test_empty_queryset(self): self.assertIs(Annotation.objects.none().ordered, True) def test_order_by_extra(self): self.assertIs(Annotation.objects.extra(order_by=["id"]).ordered, True) def test_annotated_ordering(self): qs = Annotation.objects.annotate(num_notes=Count("notes")) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by("num_notes").ordered, True) def test_annotated_default_ordering(self): qs = Tag.objects.annotate(num_notes=Count("pk")) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by("name").ordered, True) def test_annotated_values_default_ordering(self): qs = Tag.objects.values("name").annotate(num_notes=Count("pk")) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by("name").ordered, True) @skipUnlessDBFeature("allow_sliced_subqueries_with_in")
QuerysetOrderedTests
python
PyCQA__pylint
pylint/checkers/bad_chained_comparison.py
{ "start": 611, "end": 2238 }
class ____(BaseChecker): """Checks for unintentional usage of chained comparison.""" name = "bad-chained-comparison" msgs = { "W3601": ( "Suspicious %s-part chained comparison using semantically incompatible operators (%s)", "bad-chained-comparison", "Used when there is a chained comparison where one expression is part " "of two comparisons that belong to different semantic groups " '("<" does not mean the same thing as "is", chaining them in ' '"0 < x is None" is probably a mistake).', ) } def _has_diff_semantic_groups(self, operators: list[str]) -> bool: # Check if comparison operators are in the same semantic group for semantic_group in (COMPARISON_OP, IDENTITY_OP, MEMBERSHIP_OP): if operators[0] in semantic_group: group = semantic_group return not all(o in group for o in operators) def visit_compare(self, node: nodes.Compare) -> None: operators = sorted({op[0] for op in node.ops}) if self._has_diff_semantic_groups(operators): num_parts = f"{len(node.ops)}" incompatibles = ( ", ".join(f"'{o}'" for o in operators[:-1]) + f" and '{operators[-1]}'" ) self.add_message( "bad-chained-comparison", node=node, args=(num_parts, incompatibles), confidence=HIGH, ) def register(linter: PyLinter) -> None: linter.register_checker(BadChainedComparisonChecker(linter))
BadChainedComparisonChecker
python
django__django
tests/admin_changelist/models.py
{ "start": 521, "end": 818 }
class ____(models.Model): parent = models.ForeignKey(Child, models.SET_NULL, editable=False, null=True) name = models.CharField(max_length=30, blank=True) def __str__(self): return self.name def __html__(self): return f'<h2 class="main">{self.name}</h2>'
GrandChild
python
realpython__materials
duck-typing-python/birds_v1.py
{ "start": 256, "end": 397 }
class ____: def swim(self): print("The albatross is swimming") def fly(self): print("The albatross is flying")
Albatross
python
getsentry__sentry
src/sentry/integrations/vsts/integration.py
{ "start": 15918, "end": 26962 }
class ____(IntegrationProvider): key = IntegrationProviderSlug.AZURE_DEVOPS.value name = "Azure DevOps" metadata = metadata api_version = "4.1" oauth_redirect_url = "/extensions/vsts/setup/" needs_default_identity = True integration_cls = VstsIntegration CURRENT_MIGRATION_VERSION = 1 NEW_SCOPES = ("offline_access", "499b84ac-1321-427f-aa17-267ca6975798/.default") features = frozenset( [ IntegrationFeatures.COMMITS, IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.ISSUE_SYNC, IntegrationFeatures.STACKTRACE_LINK, IntegrationFeatures.CODEOWNERS, IntegrationFeatures.TICKET_RULES, ] ) setup_dialog_config = {"width": 600, "height": 800} VSTS_ACCOUNT_LOOKUP_URL = "https://app.vssps.visualstudio.com/_apis/resourceareas/79134C72-4A58-4B42-976C-04E7115F32BF?hostId=%s&api-preview=5.0-preview.1" def post_install( self, integration: IntegrationModel, organization: RpcOrganization, *, extra: dict[str, Any], ) -> None: repos = repository_service.get_repositories( organization_id=organization.id, providers=["visualstudio", "integrations:vsts"], has_integration=False, ) for repo in repos: migrate_repo.apply_async( kwargs={ "repo_id": repo.id, "integration_id": integration.id, "organization_id": organization.id, } ) def get_scopes(self) -> Sequence[str]: # TODO(ecosystem): Delete this after Azure DevOps migration is complete assert self.pipeline.organization is not None if features.has( "organizations:migrate-azure-devops-integration", self.pipeline.organization ): logger.info( "vsts.get_scopes.new_scopes", extra={"organization_id": self.pipeline.organization.id}, ) # This is the new way we need to pass scopes to the OAuth flow # https://stackoverflow.com/questions/75729931/get-access-token-for-azure-devops-pat return VstsIntegrationProvider.NEW_SCOPES logger.info( "vsts.get_scopes.old_scopes", extra={"organization_id": self.pipeline.organization.id}, ) return ("vso.code", "vso.graph", "vso.serviceendpoint_manage", "vso.work_write") def get_pipeline_views(self) -> Sequence[PipelineView[IntegrationPipeline]]: identity_pipeline_config = { "redirect_url": absolute_uri(self.oauth_redirect_url), "oauth_scopes": self.get_scopes(), } return [ NestedPipelineView( bind_key="identity", provider_key=self.key, pipeline_cls=IdentityPipeline, config=identity_pipeline_config, ), AccountConfigView(), ] def build_integration(self, state: Mapping[str, Any]) -> IntegrationData: data = state["identity"]["data"] oauth_data = self.get_oauth_data(data) account = state["account"] user = get_user_info(data["access_token"]) scopes = sorted(self.get_scopes()) base_url = self.get_base_url(data["access_token"], account["accountId"]) assert self.pipeline.organization is not None logger.info( "vsts.build_integration.base_config", extra={"scopes": scopes, "organization_id": self.pipeline.organization.id}, ) integration: IntegrationData = { "name": account["accountName"], "external_id": account["accountId"], "metadata": {"domain_name": base_url, "scopes": scopes}, "user_identity": { "type": IntegrationProviderSlug.AZURE_DEVOPS.value, "external_id": user["id"], "scopes": scopes, "data": oauth_data, }, } # TODO(ecosystem): Clean this up this after Azure DevOps migration is complete try: integration_model = IntegrationModel.objects.get( provider=IntegrationProviderSlug.AZURE_DEVOPS.value, external_id=account["accountId"], status=ObjectStatus.ACTIVE, ) # Get Integration Metadata integration_migration_version = integration_model.metadata.get( "integration_migration_version", 0 ) if ( features.has( "organizations:migrate-azure-devops-integration", self.pipeline.organization ) and integration_migration_version < VstsIntegrationProvider.CURRENT_MIGRATION_VERSION ): subscription_id, subscription_secret = self.create_subscription( base_url=base_url, oauth_data=oauth_data ) integration["metadata"]["subscription"] = { "id": subscription_id, "secret": subscription_secret, } logger.info( "vsts.build_integration.migrated", extra={ "organization_id": self.pipeline.organization.id, "user_id": user["id"], "account": account, "migration_version": VstsIntegrationProvider.CURRENT_MIGRATION_VERSION, "subscription_id": subscription_id, "integration_id": integration_model.id, }, ) else: # preserve previously created subscription information integration["metadata"]["subscription"] = integration_model.metadata["subscription"] logger.info( "vsts.build_integration", extra={ "organization_id": self.pipeline.organization.id, "user_id": user["id"], "account": account, }, ) assert OrganizationIntegration.objects.filter( organization_id=self.pipeline.organization.id, integration_id=integration_model.id, status=ObjectStatus.ACTIVE, ).exists() metrics.incr( "integrations.migration.vsts_integration_migration", sample_rate=1.0, ) # Assertion error happens when org_integration does not exist # KeyError happens when subscription is not found except (IntegrationModel.DoesNotExist, AssertionError, KeyError): logger.warning( "vsts.build_integration.error", extra={ "organization_id": ( self.pipeline.organization.id if self.pipeline and self.pipeline.organization else None ), "user_id": user["id"], "account": account, }, ) subscription_id, subscription_secret = self.create_subscription( base_url=base_url, oauth_data=oauth_data ) integration["metadata"]["subscription"] = { "id": subscription_id, "secret": subscription_secret, } # Ensure integration_migration_version is set if the feature flag is active. # This guarantees that if the new scopes are in use (due to the flag), # the metadata correctly reflects the current migration version, even if # the integration was already considered "up-to-date" based on DB records. if features.has( "organizations:migrate-azure-devops-integration", self.pipeline.organization ): integration["metadata"][ "integration_migration_version" ] = VstsIntegrationProvider.CURRENT_MIGRATION_VERSION return integration def create_subscription(self, base_url: str, oauth_data: Mapping[str, Any]) -> tuple[int, str]: client = VstsSetupApiClient( base_url=base_url, oauth_redirect_url=self.oauth_redirect_url, access_token=oauth_data["access_token"], ) shared_secret = generate_token() try: subscription = client.create_subscription(shared_secret=shared_secret) except ApiError as e: auth_codes = (400, 401, 403) permission_error = "permission" in str(e) or "not authorized" in str(e) if e.code in auth_codes or permission_error: assert self.pipeline.organization is not None logger.info( "vsts.create_subscription_permission_error", extra={ "organization_id": self.pipeline.organization.id, "error_message": str(e), "error_code": e.code, }, ) raise IntegrationProviderError( "Sentry cannot communicate with this Azure DevOps organization.\n" "Please ensure third-party app access via OAuth is enabled \n" "in the organization's security policy \n" "The user installing the integration must have project administrator permissions. \n" "The user installing might also need admin permissions depending on the organization's security policy." ) raise subscription_id = subscription["id"] return subscription_id, shared_secret def get_oauth_data(self, payload: Mapping[str, Any]) -> dict[str, Any]: data = {"access_token": payload["access_token"]} if "expires_in" in payload: data["expires"] = int(time()) + int(payload["expires_in"]) if "refresh_token" in payload: data["refresh_token"] = payload["refresh_token"] if "token_type" in payload: data["token_type"] = payload["token_type"] return data @classmethod def get_base_url(cls, access_token: str, account_id: str) -> str: url = VstsIntegrationProvider.VSTS_ACCOUNT_LOOKUP_URL % account_id with http.build_session() as session: response = session.get( url, headers={ "Content-Type": "application/json", "Authorization": f"Bearer {access_token}", }, ) return response.json()["locationUrl"] def setup(self) -> None: from sentry.plugins.base import bindings bindings.add( "integration-repository.provider", VstsRepositoryProvider, id="integrations:vsts" )
VstsIntegrationProvider
python
graphql-python__graphene
graphene/relay/node.py
{ "start": 3039, "end": 4359 }
class ____(AbstractNode): """An object with an ID""" @classmethod def Field(cls, *args, **kwargs): # noqa: N802 return NodeField(cls, *args, **kwargs) @classmethod def node_resolver(cls, only_type, root, info, id): return cls.get_node_from_global_id(info, id, only_type=only_type) @classmethod def get_node_from_global_id(cls, info, global_id, only_type=None): _type, _id = cls.resolve_global_id(info, global_id) graphene_type = info.schema.get_type(_type) if graphene_type is None: raise Exception(f'Relay Node "{_type}" not found in schema') graphene_type = graphene_type.graphene_type if only_type: assert ( graphene_type == only_type ), f"Must receive a {only_type._meta.name} id." # We make sure the ObjectType implements the "Node" interface if cls not in graphene_type._meta.interfaces: raise Exception( f'ObjectType "{_type}" does not implement the "{cls}" interface.' ) get_node = getattr(graphene_type, "get_node", None) if get_node: return get_node(info, _id) @classmethod def to_global_id(cls, type_, id): return cls._meta.global_id_type.to_global_id(type_, id)
Node
python
astropy__astropy
astropy/io/votable/converters.py
{ "start": 32403, "end": 32565 }
class ____(Complex): """ Handle doubleComplex datatype. Pair of double-precision IEEE floating-point numbers. """ format = "c16"
DoubleComplex
python
tensorflow__tensorflow
tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py
{ "start": 71564, "end": 73170 }
class ____(test.TestCase): _PRNG = np.random.RandomState(341261) _SEED = 123456 def _GenerateUniqueRandomInputTensor(self, shape): num_elements = 1 for size in shape: num_elements *= size x = np.arange(num_elements, dtype=np.float32) self._PRNG.shuffle(x) return x.reshape(shape) def testDirectNotUseOverlapping(self): for num_batches in [1]: for row_window_size in [2, 5]: for col_window_size in [2, 4]: num_rows = row_window_size num_cols = col_window_size for num_channels in [1]: input_shape = (num_batches, num_rows, num_cols, num_channels) with self.cached_session() as _: input_tensor = constant_op.constant( self._GenerateUniqueRandomInputTensor(input_shape) ) window_size = [1, row_window_size, col_window_size, 1] stride_size = [1, row_window_size, col_window_size, 1] padding = "VALID" output_tensor = nn_ops.max_pool( input_tensor, window_size, stride_size, padding ) output_data = self.evaluate(output_tensor) output_backprop = self._PRNG.randint(100, size=output_data.shape) input_backprop_tensor = gen_nn_ops.max_pool_grad( input_tensor, output_tensor, output_backprop, window_size, stride_size, padding, ) _ = self.evaluate(input_backprop_tensor)
FractionalMaxPoolGradTest
python
altair-viz__altair
tests/utils/test_core.py
{ "start": 1502, "end": 1630 }
class ____(FieldChannel, schemapi.SchemaBase): _schema = {json_schema_dict_str} _encoding_name = "strokeWidth"
StrokeWidth
python
neetcode-gh__leetcode
python/0417-pacific-atlantic-water-flow.py
{ "start": 0, "end": 1131 }
class ____: def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]: ROWS, COLS = len(heights), len(heights[0]) pac, atl = set(), set() def dfs(r, c, visit, prevHeight): if ( (r, c) in visit or r < 0 or c < 0 or r == ROWS or c == COLS or heights[r][c] < prevHeight ): return visit.add((r, c)) dfs(r + 1, c, visit, heights[r][c]) dfs(r - 1, c, visit, heights[r][c]) dfs(r, c + 1, visit, heights[r][c]) dfs(r, c - 1, visit, heights[r][c]) for c in range(COLS): dfs(0, c, pac, heights[0][c]) dfs(ROWS - 1, c, atl, heights[ROWS - 1][c]) for r in range(ROWS): dfs(r, 0, pac, heights[r][0]) dfs(r, COLS - 1, atl, heights[r][COLS - 1]) res = [] for r in range(ROWS): for c in range(COLS): if (r, c) in pac and (r, c) in atl: res.append([r, c]) return res
Solution
python
RobertCraigie__pyright-python
src/pyright/errors.py
{ "start": 54, "end": 216 }
class ____(Exception): message: str def __init__(self, message: str) -> None: super().__init__(message) self.message = message
PyrightError
python
realpython__materials
python-protocol/contents.py
{ "start": 358, "end": 648 }
class ____: def __init__(self): self.blog_posts = [] def create_content(self) -> str: return "Creating a post." def add_post(self, title: str, content: str) -> None: self.blog_posts.append(f"{title}: {content}") print(f"Post added: {title}")
Blog
python
falconry__falcon
falcon/response.py
{ "start": 53757, "end": 56547 }
class ____: """Defines a set of configurable response options. An instance of this class is exposed via :attr:`falcon.App.resp_options` and :attr:`falcon.asgi.App.resp_options` for configuring certain :class:`~.Response` behaviors. """ secure_cookies_by_default: bool """Set to ``False`` in development environments to make the ``secure`` attribute for all cookies. (default ``True``). This can make testing easier by not requiring HTTPS. Note, however, that this setting can be overridden via :meth:`~.Response.set_cookie()`'s ``secure`` kwarg. """ default_media_type: str """The default Internet media type (RFC 2046) to use when rendering a response, when the Content-Type header is not set explicitly. This value is normally set to the media type provided when a :class:`falcon.App` is initialized; however, if created independently, this will default to :attr:`falcon.DEFAULT_MEDIA_TYPE`. """ media_handlers: Handlers """A dict-like object for configuring the media-types to handle. Default handlers are provided for the ``application/json``, ``application/x-www-form-urlencoded`` and ``multipart/form-data`` media types. """ static_media_types: dict[str, str] """A mapping of dot-prefixed file extensions to Internet media types (RFC 2046). Defaults to ``mimetypes.types_map`` after calling ``mimetypes.init()``. """ xml_error_serialization: bool """Set to ``False`` to disable automatic inclusion of the XML handler in the :ref:`default error serializer <errors>` (default ``True``). Enabling this option does not make Falcon automatically render all error responses in XML, but it is used only in the case the client prefers (via the ``Accept`` request header) XML to JSON and other configured media handlers. Note: Falcon 5.0 will either change the default to ``False``, or remove the automatic XML error serialization altogether. Note: This option has no effect when a custom error serializer, set using :meth:`~falcon.App.set_error_serializer`, is in use. .. versionadded:: 4.0 """ __slots__ = ( 'secure_cookies_by_default', 'default_media_type', 'media_handlers', 'static_media_types', 'xml_error_serialization', ) def __init__(self) -> None: self.secure_cookies_by_default = True self.default_media_type = DEFAULT_MEDIA_TYPE self.media_handlers = Handlers() self.xml_error_serialization = True if not mimetypes.inited: mimetypes.init() self.static_media_types = mimetypes.types_map.copy() self.static_media_types.update(_DEFAULT_STATIC_MEDIA_TYPES)
ResponseOptions
python
ansible__ansible
lib/ansible/plugins/doc_fragments/default_callback.py
{ "start": 194, "end": 3308 }
class ____(object): DOCUMENTATION = r""" options: display_skipped_hosts: name: Show skipped hosts description: "Toggle to control displaying skipped task/host results in a task." type: bool default: yes env: - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS ini: - key: display_skipped_hosts section: defaults vars: - name: ansible_display_skipped_hosts version_added: "2.21" display_ok_hosts: name: Show 'ok' hosts description: "Toggle to control displaying 'ok' task/host results in a task." type: bool default: yes env: - name: ANSIBLE_DISPLAY_OK_HOSTS ini: - key: display_ok_hosts section: defaults version_added: '2.7' display_failed_stderr: name: Use STDERR for failed and unreachable tasks description: "Toggle to control whether failed and unreachable tasks are displayed to STDERR rather than STDOUT." type: bool default: no env: - name: ANSIBLE_DISPLAY_FAILED_STDERR ini: - key: display_failed_stderr section: defaults version_added: '2.7' show_custom_stats: name: Show custom stats description: 'This adds the custom stats set via the set_stats plugin to the play recap.' type: bool default: no env: - name: ANSIBLE_SHOW_CUSTOM_STATS ini: - key: show_custom_stats section: defaults show_per_host_start: name: Show per host task start description: 'This adds output that shows when a task starts to execute for each host.' type: bool default: no env: - name: ANSIBLE_SHOW_PER_HOST_START ini: - key: show_per_host_start section: defaults version_added: '2.9' check_mode_markers: name: Show markers when running in check mode description: - Toggle to control displaying markers when running in check mode. - "The markers are C(DRY RUN) at the beginning and ending of playbook execution (when calling C(ansible-playbook --check)) and C(CHECK MODE) as a suffix at every play and task that is run in check mode." type: bool default: no version_added: '2.9' env: - name: ANSIBLE_CHECK_MODE_MARKERS ini: - key: check_mode_markers section: defaults show_task_path_on_failure: name: Show file path on failed tasks description: - When a task fails, display the path to the file containing the failed task and the line number. This information is displayed automatically for every task when running with C(-vv) or greater verbosity. type: bool default: no env: - name: ANSIBLE_SHOW_TASK_PATH_ON_FAILURE ini: - key: show_task_path_on_failure section: defaults version_added: '2.11' """
ModuleDocFragment
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol24.py
{ "start": 1185, "end": 1221 }
class ____(type): attr1: int
GMeta
python
google__flatbuffers
tests/service_test_generated.py
{ "start": 136, "end": 830 }
class ____(object): __slots__ = ['_tab'] @classmethod def GetRootAs(cls, buf, offset: int = 0): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = HelloRequest() x.Init(buf, n + offset) return x @classmethod def GetRootAsHelloRequest(cls, buf, offset=0): """This method is deprecated. Please switch to GetRootAs.""" return cls.GetRootAs(buf, offset) # HelloRequest def Init(self, buf: bytes, pos: int): self._tab = flatbuffers.table.Table(buf, pos) def HelloRequestStart(builder: flatbuffers.Builder): builder.StartObject(0) def HelloRequestEnd(builder: flatbuffers.Builder) -> int: return builder.EndObject()
HelloRequest
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/roles.py
{ "start": 6724, "end": 6846 }
class ____(FromClauseRole): __slots__ = () _role_name = "subject table for an INSERT, UPDATE or DELETE"
DMLTableRole
python
pallets__werkzeug
tests/test_datastructures.py
{ "start": 10577, "end": 10895 }
class ____(_ImmutableDictTests): storage_class = _ImmutableOrderedMultiDict def test_ordered_multidict_is_hashable(self): a = self.storage_class([("a", 1), ("b", 1), ("a", 2)]) b = self.storage_class([("a", 1), ("a", 2), ("b", 1)]) assert hash(a) != hash(b)
TestImmutableOrderedMultiDict
python
ray-project__ray
python/ray/serve/tests/test_model_composition.py
{ "start": 4016, "end": 4304 }
class ____: def __init__(self, s: str): self._s = s def __call__(self, *args): return self._s def test_single_node_deploy_success(serve_instance): m1 = Adder.bind(1) handle = serve.run(m1) assert handle.remote(41).result() == 42 @serve.deployment
Echo
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/array_ops_test.py
{ "start": 60684, "end": 61680 }
class ____(test_util.TensorFlowTestCase): @test_util.run_gpu_only def testEagerIdentity(self): with context.eager_mode(): def _test(x, y, device): self.assertAllEqual(x.numpy(), y.numpy()) self.assertIn(device, y.device.lower()) with test_util.force_gpu(): a = constant_op.constant([[2], [3]], dtype=dtypes.float32) with test_util.force_gpu(): b = array_ops.identity(a) _test(a, b, "gpu") with test_util.force_cpu(): c = array_ops.identity(b) _test(b, c, "cpu") with test_util.force_cpu(): d = array_ops.identity(c) _test(c, d, "cpu") with test_util.force_gpu(): e = array_ops.identity(d) _test(d, e, "gpu") def testIdentityVariable(self): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(v.initializer) result = array_ops.identity(v) self.assertIsInstance(result, tensor_lib.Tensor) self.assertAllEqual(result, v)
IdentityTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol1.py
{ "start": 2082, "end": 2619 }
class ____(ProtoBase2[_B], Protocol[_A, _B]): ... p5_1: Proto5[float, int] # This should generate an error because the second type argument # corresponds to _B, which is bound to int. p5_2: Proto5[int, float] def func1(): # This should generate an error because Protocol isn't # allowed in a type annotation. v: Protocol | int # This should generate an error because Protocol isn't # allowed in a TypeVar bound. T2 = TypeVar("T2", bound=Protocol | int) # This should generate an error because int is not a TypeVar
Proto5
python
coleifer__peewee
tests/models.py
{ "start": 91689, "end": 91812 }
class ____(TestModel): timestamp = DateTimeField(constraints=[SQL('default (now())')]) @requires_postgresql
ServerDefault
python
python-pillow__Pillow
src/PIL/ImageFilter.py
{ "start": 8215, "end": 8406 }
class ____(BuiltinFilter): name = "Edge-enhance" # fmt: off filterargs = (3, 3), 2, 0, ( -1, -1, -1, -1, 10, -1, -1, -1, -1, ) # fmt: on
EDGE_ENHANCE
python
PyCQA__pylint
tests/functional/p/protocol_classes_abstract.py
{ "start": 908, "end": 1031 }
class ____(FooProtocol, metaclass=ABCMeta): """Doesn't subclass typing.Protocol but uses metaclass directly"""
AbcProtocol
python
cython__cython
Cython/Compiler/PyrexTypes.py
{ "start": 174132, "end": 175234 }
class ____(CType): def __init__(self, name, optional=False): self.name = name self.optional = optional def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if entity_code: return self.name + " " + entity_code else: return self.name def specialize(self, values): if self in values: return values[self] else: return self def deduce_template_params(self, actual): return {self: actual} def same_as_resolved_type(self, other_type): if isinstance(other_type, TemplatePlaceholderType): return self.name == other_type.name else: return 0 def __hash__(self): return hash(self.name) def __eq__(self, other): if isinstance(other, TemplatePlaceholderType): return self.name == other.name else: return False def is_optional_template_param(type): return isinstance(type, TemplatePlaceholderType) and type.optional
TemplatePlaceholderType
python
readthedocs__readthedocs.org
readthedocs/organizations/migrations/0011_add_stripe_subscription_field.py
{ "start": 182, "end": 1316 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("djstripe", "0010_alter_customer_balance"), ("organizations", "0010_add_stripe_customer"), ] operations = [ migrations.AddField( model_name="historicalorganization", name="stripe_subscription", field=models.ForeignKey( blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name="+", to="djstripe.subscription", verbose_name="Stripe subscription", ), ), migrations.AddField( model_name="organization", name="stripe_subscription", field=models.OneToOneField( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="rtd_organization", to="djstripe.subscription", verbose_name="Stripe subscription", ), ), ]
Migration
python
jmcnamara__XlsxWriter
xlsxwriter/xmlwriter.py
{ "start": 586, "end": 7821 }
class ____: """ Simple XML writer class. """ def __init__(self) -> None: self.fh = None self.internal_fh = False def _set_filehandle(self, filehandle) -> None: # Set the writer filehandle directly. Mainly for testing. self.fh = filehandle self.internal_fh = False def _set_xml_writer(self, filename) -> None: # Set the XML writer filehandle for the object. if isinstance(filename, StringIO): self.internal_fh = False self.fh = filename else: self.internal_fh = True # pylint: disable-next=consider-using-with self.fh = open(filename, "w", encoding="utf-8") def _xml_close(self) -> None: # Close the XML filehandle if we created it. if self.internal_fh: self.fh.close() def _xml_declaration(self) -> None: # Write the XML declaration. self.fh.write('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n') def _xml_start_tag(self, tag, attributes=[]) -> None: # Write an XML start tag with optional attributes. for key, value in attributes: value = self._escape_attributes(value) tag += f' {key}="{value}"' self.fh.write(f"<{tag}>") def _xml_start_tag_unencoded(self, tag, attributes=[]) -> None: # Write an XML start tag with optional, unencoded, attributes. # This is a minor speed optimization for elements that don't # need encoding. for key, value in attributes: tag += f' {key}="{value}"' self.fh.write(f"<{tag}>") def _xml_end_tag(self, tag) -> None: # Write an XML end tag. self.fh.write(f"</{tag}>") def _xml_empty_tag(self, tag, attributes=[]) -> None: # Write an empty XML tag with optional attributes. for key, value in attributes: value = self._escape_attributes(value) tag += f' {key}="{value}"' self.fh.write(f"<{tag}/>") def _xml_empty_tag_unencoded(self, tag, attributes=[]) -> None: # Write an empty XML tag with optional, unencoded, attributes. # This is a minor speed optimization for elements that don't # need encoding. for key, value in attributes: tag += f' {key}="{value}"' self.fh.write(f"<{tag}/>") def _xml_data_element(self, tag, data, attributes=[]) -> None: # Write an XML element containing data with optional attributes. end_tag = tag for key, value in attributes: value = self._escape_attributes(value) tag += f' {key}="{value}"' data = self._escape_data(data) data = self._escape_control_characters(data) self.fh.write(f"<{tag}>{data}</{end_tag}>") def _xml_string_element(self, index, attributes=[]) -> None: # Optimized tag writer for <c> cell string elements in the inner loop. attr = "" for key, value in attributes: value = self._escape_attributes(value) attr += f' {key}="{value}"' self.fh.write(f'<c{attr} t="s"><v>{index}</v></c>') def _xml_si_element(self, string, attributes=[]) -> None: # Optimized tag writer for shared strings <si> elements. attr = "" for key, value in attributes: value = self._escape_attributes(value) attr += f' {key}="{value}"' string = self._escape_data(string) self.fh.write(f"<si><t{attr}>{string}</t></si>") def _xml_rich_si_element(self, string) -> None: # Optimized tag writer for shared strings <si> rich string elements. self.fh.write(f"<si>{string}</si>") def _xml_number_element(self, number, attributes=[]) -> None: # Optimized tag writer for <c> cell number elements in the inner loop. attr = "" for key, value in attributes: value = self._escape_attributes(value) attr += f' {key}="{value}"' self.fh.write(f"<c{attr}><v>{number:.16G}</v></c>") def _xml_formula_element(self, formula, result, attributes=[]) -> None: # Optimized tag writer for <c> cell formula elements in the inner loop. attr = "" for key, value in attributes: value = self._escape_attributes(value) attr += f' {key}="{value}"' formula = self._escape_data(formula) result = self._escape_data(result) self.fh.write(f"<c{attr}><f>{formula}</f><v>{result}</v></c>") def _xml_inline_string(self, string, preserve, attributes=[]) -> None: # Optimized tag writer for inlineStr cell elements in the inner loop. attr = "" t_attr = "" # Set the <t> attribute to preserve whitespace. if preserve: t_attr = ' xml:space="preserve"' for key, value in attributes: value = self._escape_attributes(value) attr += f' {key}="{value}"' string = self._escape_data(string) self.fh.write(f'<c{attr} t="inlineStr"><is><t{t_attr}>{string}</t></is></c>') def _xml_rich_inline_string(self, string, attributes=[]) -> None: # Optimized tag writer for rich inlineStr in the inner loop. attr = "" for key, value in attributes: value = self._escape_attributes(value) attr += f' {key}="{value}"' self.fh.write(f'<c{attr} t="inlineStr"><is>{string}</is></c>') def _escape_attributes(self, attribute): # Escape XML characters in attributes. try: if not xml_escapes.search(attribute): return attribute except TypeError: return attribute attribute = ( attribute.replace("&", "&amp;") .replace('"', "&quot;") .replace("<", "&lt;") .replace(">", "&gt;") .replace("\n", "&#xA;") ) return attribute def _escape_data(self, data): # Escape XML characters in data sections of tags. Note, this # is different from _escape_attributes() in that double quotes # are not escaped by Excel. try: if not xml_escapes.search(data): return data except TypeError: return data data = data.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") return data @staticmethod def _escape_control_characters(data): # Excel escapes control characters with _xHHHH_ and also escapes any # literal strings of that type by encoding the leading underscore. # So "\0" -> _x0000_ and "_x0000_" -> _x005F_x0000_. # The following substitutions deal with those cases. try: # Escape the escape. data = re_control_chars_1.sub(r"_x005F\1", data) except TypeError: return data # Convert control character to the _xHHHH_ escape. data = re_control_chars_2.sub( lambda match: f"_x{ord(match.group(1)):04X}_", data ) # Escapes non characters in strings. data = data.replace("\ufffe", "_xFFFE_").replace("\uffff", "_xFFFF_") return data
XMLwriter
python
altair-viz__altair
tools/generate_schema_wrapper.py
{ "start": 14251, "end": 14635 }
class ____(SchemaGenerator): """Base template w/ an extra slot `{method_code}` after `{init_code}`.""" schema_class_template = textwrap.dedent( ''' class {classname}({basename}): """{docstring}""" _schema = {schema!r} {init_code} {method_code} ''' ) SchGen = TypeVar("SchGen", bound=SchemaGenerator)
MethodSchemaGenerator
python
scipy__scipy
scipy/io/_harwell_boeing/_fortran_format_parser.py
{ "start": 723, "end": 2427 }
class ____: @classmethod def from_number(cls, n, min=None): """Given an integer, returns a "reasonable" IntFormat instance to represent any number between 0 and n if n > 0, -n and n if n < 0 Parameters ---------- n : int max number one wants to be able to represent min : int minimum number of characters to use for the format Returns ------- res : IntFormat IntFormat instance with reasonable (see Notes) computed width Notes ----- Reasonable should be understood as the minimal string length necessary without losing precision. For example, IntFormat.from_number(1) will return an IntFormat instance of width 2, so that any 0 and 1 may be represented as 1-character strings without loss of information. """ width = number_digits(n) + 1 if n < 0: width += 1 repeat = 80 // width return cls(width, min, repeat=repeat) def __init__(self, width, min=None, repeat=None): self.width = width self.repeat = repeat self.min = min def __repr__(self): r = "IntFormat(" if self.repeat: r += f"{self.repeat}" r += f"I{self.width}" if self.min: r += f".{self.min}" return r + ")" @property def fortran_format(self): r = "(" if self.repeat: r += f"{self.repeat}" r += f"I{self.width}" if self.min: r += f".{self.min}" return r + ")" @property def python_format(self): return "%" + str(self.width) + "d"
IntFormat
python
huggingface__transformers
src/transformers/models/vit_mae/configuration_vit_mae.py
{ "start": 800, "end": 6372 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`ViTMAEModel`]. It is used to instantiate an ViT MAE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ViT [facebook/vit-mae-base](https://huggingface.co/facebook/vit-mae-base) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. decoder_num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the decoder. decoder_num_hidden_layers (`int`, *optional*, defaults to 8): Number of hidden layers in the decoder. decoder_intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder. mask_ratio (`float`, *optional*, defaults to 0.75): The ratio of the number of masked tokens in the input sequence. norm_pix_loss (`bool`, *optional*, defaults to `False`): Whether or not to train with normalized pixels (see Table 3 in the paper). Using normalized pixels improved representation quality in the experiments of the authors. Example: ```python >>> from transformers import ViTMAEConfig, ViTMAEModel >>> # Initializing a ViT MAE vit-mae-base style configuration >>> configuration = ViTMAEConfig() >>> # Initializing a model (with random weights) from the vit-mae-base style configuration >>> model = ViTMAEModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vit_mae" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, decoder_num_attention_heads=16, decoder_hidden_size=512, decoder_num_hidden_layers=8, decoder_intermediate_size=2048, mask_ratio=0.75, norm_pix_loss=False, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_hidden_size = decoder_hidden_size self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.mask_ratio = mask_ratio self.norm_pix_loss = norm_pix_loss __all__ = ["ViTMAEConfig"]
ViTMAEConfig
python
langchain-ai__langchain
libs/langchain/langchain_classic/agents/conversational_chat/output_parser.py
{ "start": 417, "end": 2320 }
class ____(AgentOutputParser): """Output parser for the conversational agent.""" format_instructions: str = FORMAT_INSTRUCTIONS """Default formatting instructions""" def get_format_instructions(self) -> str: """Returns formatting instructions for the given output parser.""" return self.format_instructions def parse(self, text: str) -> AgentAction | AgentFinish: """Attempts to parse the given text into an AgentAction or AgentFinish. Raises: OutputParserException if parsing fails. """ try: # Attempt to parse the text into a structured format (assumed to be JSON # stored as markdown) response = parse_json_markdown(text) # If the response contains an 'action' and 'action_input' if "action" in response and "action_input" in response: action, action_input = response["action"], response["action_input"] # If the action indicates a final answer, return an AgentFinish if action == "Final Answer": return AgentFinish({"output": action_input}, text) # Otherwise, return an AgentAction with the specified action and # input return AgentAction(action, action_input, text) # If the necessary keys aren't present in the response, raise an # exception msg = f"Missing 'action' or 'action_input' in LLM output: {text}" raise OutputParserException(msg) except Exception as e: # If any other exception is raised during parsing, also raise an # OutputParserException msg = f"Could not parse LLM output: {text}" raise OutputParserException(msg) from e @property def _type(self) -> str: return "conversational_chat"
ConvoOutputParser
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/pipeline.py
{ "start": 9531, "end": 13141 }
class ____(Step): """ Pipeline step to update connector's metadata, acceptance-test-config and readme to manifest-only. """ context: ConnectorContext title = "Update Connector Metadata" async def _run(self) -> StepResult: connector = self.context.connector ## 1. Update the acceptance test config to point to the right spec path try: acceptance_test_config_data = read_yaml(connector.acceptance_test_config_path) # Handle legacy acceptance-test-config: if "acceptance_tests" in acceptance_test_config_data: acceptance_test_config_data["acceptance_tests"]["spec"]["tests"][0]["spec_path"] = "manifest.yaml" else: acceptance_test_config_data["tests"]["spec"][0]["spec_path"] = "manifest.yaml" write_yaml(acceptance_test_config_data, connector.acceptance_test_config_path) except Exception as e: return StepResult(step=self, status=StepStatus.FAILURE, stdout=f"Failed to update acceptance-test-config.yml: {e}") ## 2. Update the connector's metadata self.logger.info("Updating metadata file") try: metadata = read_yaml(connector.metadata_file_path) # Remove any existing language tags and append the manifest-only tag tags = metadata.get("data", {}).get("tags", []) for tag in tags: if "language:" in tag: tags.remove(tag) tags.append("language:manifest-only") pypi_package = metadata.get("data", {}).get("remoteRegistries", {}).get("pypi") if pypi_package: pypi_package["enabled"] = False # Update the base image latest_base_image = get_latest_base_image("airbyte/source-declarative-manifest") connector_base_image = metadata.get("data", {}).get("connectorBuildOptions") connector_base_image["baseImage"] = latest_base_image # Write the changes to metadata.yaml write_yaml(metadata, connector.metadata_file_path) except Exception as e: return StepResult(step=self, status=StepStatus.FAILURE, stdout=f"Failed to update metadata.yaml: {e}") ## 3. Update the connector's README self.logger.info("Updating README file") readme = readme_for_connector(connector.technical_name) with open(connector.code_directory / "README.md", "w") as file: file.write(readme) return StepResult(step=self, status=StepStatus.SUCCESS, stdout="The connector has been successfully migrated to manifest-only.") ## MAIN FUNCTION ## async def run_connectors_manifest_only_pipeline(context: ConnectorContext, semaphore: "Semaphore", *args: Any) -> Report: steps_to_run: STEP_TREE = [] steps_to_run.append([StepToRun(id=CONNECTOR_TEST_STEP_ID.MANIFEST_ONLY_CHECK, step=CheckIsManifestMigrationCandidate(context))]) steps_to_run.append( [ StepToRun( id=CONNECTOR_TEST_STEP_ID.MANIFEST_ONLY_STRIP, step=StripConnector(context), depends_on=[CONNECTOR_TEST_STEP_ID.MANIFEST_ONLY_CHECK], ) ] ) steps_to_run.append( [ StepToRun( id=CONNECTOR_TEST_STEP_ID.MANIFEST_ONLY_UPDATE, step=UpdateManifestOnlyFiles(context), depends_on=[CONNECTOR_TEST_STEP_ID.MANIFEST_ONLY_STRIP], ) ] ) return await run_connector_steps(context, semaphore, steps_to_run)
UpdateManifestOnlyFiles
python
readthedocs__readthedocs.org
readthedocs/builds/admin.py
{ "start": 699, "end": 883 }
class ____(admin.TabularInline): model = BuildCommandResult fields = ("command", "exit_code", "output") classes = ["collapse"] @admin.register(Build)
BuildCommandResultInline
python
giampaolo__psutil
tests/test_linux.py
{ "start": 19222, "end": 22664 }
class ____(PsutilTestCase): @staticmethod def meminfo_has_swap_info(): """Return True if /proc/meminfo provides swap metrics.""" with open("/proc/meminfo") as f: data = f.read() return 'SwapTotal:' in data and 'SwapFree:' in data def test_total(self): free_value = free_swap().total psutil_value = psutil.swap_memory().total assert abs(free_value - psutil_value) < TOLERANCE_SYS_MEM @retry_on_failure() def test_used(self): free_value = free_swap().used psutil_value = psutil.swap_memory().used assert abs(free_value - psutil_value) < TOLERANCE_SYS_MEM @retry_on_failure() def test_free(self): free_value = free_swap().free psutil_value = psutil.swap_memory().free assert abs(free_value - psutil_value) < TOLERANCE_SYS_MEM def test_missing_sin_sout(self): with mock.patch('psutil._common.open', create=True) as m: with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") ret = psutil.swap_memory() assert m.called assert len(ws) == 1 w = ws[0] assert ( "'sin' and 'sout' swap memory stats couldn't be determined" in str(w.message) ) assert ret.sin == 0 assert ret.sout == 0 def test_no_vmstat_mocked(self): # see https://github.com/giampaolo/psutil/issues/722 with mock_open_exception("/proc/vmstat", FileNotFoundError) as m: with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") ret = psutil.swap_memory() assert m.called assert len(ws) == 1 w = ws[0] assert ( "'sin' and 'sout' swap memory stats couldn't " "be determined and were set to 0" in str(w.message) ) assert ret.sin == 0 assert ret.sout == 0 def test_meminfo_against_sysinfo(self): # Make sure the content of /proc/meminfo about swap memory # matches sysinfo() syscall, see: # https://github.com/giampaolo/psutil/issues/1015 if not self.meminfo_has_swap_info(): return pytest.skip("/proc/meminfo has no swap metrics") with mock.patch('psutil._pslinux.cext.linux_sysinfo') as m: swap = psutil.swap_memory() assert not m.called import psutil._psutil_linux as cext _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo() total *= unit_multiplier free *= unit_multiplier assert swap.total == total assert abs(swap.free - free) < TOLERANCE_SYS_MEM def test_emulate_meminfo_has_no_metrics(self): # Emulate a case where /proc/meminfo provides no swap metrics # in which case sysinfo() syscall is supposed to be used # as a fallback. with mock_open_content({"/proc/meminfo": b""}) as m: psutil.swap_memory() assert m.called # ===================================================================== # --- system CPU # ===================================================================== @pytest.mark.skipif(not LINUX, reason="LINUX only")
TestSystemSwapMemory
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-service-now/tests/test_snow_kb_reader.py
{ "start": 3619, "end": 24890 }
class ____: """Test class for ServiceNow Knowledge Base Reader.""" def test_initialization(self, mock_pysnc_imports): """Test that SnowKBReader initializes correctly.""" with patch( "llama_index.readers.service_now.base.ServiceNowClient", MockServiceNowClient, ): with patch( "llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow", MockPasswordGrantFlow, ): from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType custom_parsers = { FileType.HTML: MockCustomParser(), # Required FileType.PDF: MockCustomParser(), } reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_pass", client_id="test_client_id", client_secret="test_client_secret", ) assert reader.instance == "test.service-now.com" assert reader.username == "test_user" assert reader.password == "test_pass" assert reader.client_id == "test_client_id" assert reader.client_secret == "test_client_secret" assert reader.kb_table == "kb_knowledge" assert reader.pysnc_client is not None assert reader.custom_parsers == custom_parsers def test_initialization_missing_credentials(self): """Test that SnowKBReader raises error when missing required credentials.""" from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType custom_parsers = { FileType.HTML: MockCustomParser(), # Required FileType.PDF: MockCustomParser(), } with pytest.raises(ValueError, match="username parameter is required"): SnowKBReader(instance="test.service-now.com", custom_parsers=custom_parsers) def test_load_data_by_sys_id(self, snow_reader): """Test loading KB article by sys_id.""" with patch.object(snow_reader, "load_data") as mock_load_data: mock_doc = Document( text="Test content", metadata={ "title": "Test KB Article", "page_id": "KB0010001", "status": "Published", }, ) mock_load_data.return_value = [mock_doc] result = snow_reader.load_data(article_sys_id="test_sys_id") assert len(result) == 1 assert result[0].text == "Test content" assert result[0].metadata["title"] == "Test KB Article" mock_load_data.assert_called_once_with(article_sys_id="test_sys_id") def test_load_data_by_numbers(self, snow_reader): """Test loading KB articles by numbers.""" with patch.object(snow_reader, "load_data") as mock_load_data: mock_doc = Document( text="Test content", metadata={ "title": "Test KB Article", "page_id": "KB0010001", "status": "Published", }, ) mock_load_data.return_value = [mock_doc] result = snow_reader.load_data(numbers=["KB0010001", "KB0010002"]) assert len(result) == 1 mock_load_data.assert_called_once_with(numbers=["KB0010001", "KB0010002"]) def test_load_data_no_parameters(self, snow_reader): """Test that load_data raises error when no parameters provided.""" with pytest.raises(ValueError, match="Must provide article_sys_id or number"): with patch.object(snow_reader, "load_data") as mock_load_data: mock_load_data.side_effect = ValueError( "Must provide article_sys_id or number" ) snow_reader.load_data() def test_get_documents_with_attachments(self, snow_reader): """Test getting documents with attachment processing.""" with patch.object(snow_reader, "handle_attachments") as mock_handle_attachments: mock_handle_attachments.return_value = [ {"file_name": "test.pdf", "markdown_text": "PDF content"} ] with patch.object( snow_reader.custom_parser_manager, "process_text_with_custom_parser" ) as mock_process: mock_process.return_value = "Processed HTML content" result = snow_reader.load_data(article_sys_id="test_sys_id") assert len(result) == 1 assert "Processed HTML content" in result[0].text assert "# test.pdf" in result[0].text assert "PDF content" in result[0].text def test_handle_attachments(self, snow_reader): """Test attachment handling functionality.""" with patch.object(snow_reader.pysnc_client, "GlideRecord") as mock_gr_class: mock_gr = MagicMock() mock_gr.next.side_effect = [True, False] mock_gr_class.return_value = mock_gr with patch.object( snow_reader, "handle_attachment" ) as mock_handle_attachment: mock_handle_attachment.return_value = { "file_name": "test.pdf", "markdown_text": "PDF content", } result = snow_reader.handle_attachments("test_sys_id", "KB0010001") assert len(result) == 1 assert result[0]["file_name"] == "test.pdf" mock_gr.add_query.assert_any_call("table_sys_id", "test_sys_id") mock_gr.add_query.assert_any_call("table_name", "kb_knowledge") def test_get_file_type(self, snow_reader): """Test file type detection.""" from llama_index.readers.service_now.base import FileType assert snow_reader.get_File_type("test.pdf") == FileType.PDF assert snow_reader.get_File_type("test.jpg") == FileType.IMAGE assert snow_reader.get_File_type("test.docx") == FileType.DOCUMENT assert snow_reader.get_File_type("test.xlsx") == FileType.SPREADSHEET assert snow_reader.get_File_type("test.txt") == FileType.TEXT assert snow_reader.get_File_type("test.html") == FileType.HTML assert snow_reader.get_File_type("test.csv") == FileType.CSV assert snow_reader.get_File_type("test.md") == FileType.MARKDOWN assert snow_reader.get_File_type("test.unknown") == FileType.UNKNOWN def test_download_attachment_content(self, snow_reader): """Test attachment content download.""" result = snow_reader._download_attachment_content("test_sys_id") assert result == b"mock file content" snow_reader.pysnc_client.attachment_api.get_file.assert_called_once_with( "test_sys_id" ) def test_download_attachment_content_failure(self, snow_reader): """Test attachment download failure handling.""" snow_reader.pysnc_client.attachment_api.get_file.side_effect = Exception( "Download failed" ) result = snow_reader._download_attachment_content("test_sys_id") assert result is None def test_custom_kb_table(self, mock_pysnc_imports): """Test initialization with custom KB table.""" with patch( "llama_index.readers.service_now.base.ServiceNowClient", MockServiceNowClient, ): with patch( "llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow", MockPasswordGrantFlow, ): from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType custom_parsers = { FileType.HTML: MockCustomParser(), # Required FileType.PDF: MockCustomParser(), } reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_pass", client_id="test_client_id", client_secret="test_client_secret", kb_table="custom_kb_table", ) assert reader.kb_table == "custom_kb_table" def test_fail_on_error_false(self, mock_pysnc_imports): """Test that fail_on_error=False allows processing to continue on errors.""" with patch( "llama_index.readers.service_now.base.ServiceNowClient", MockServiceNowClient, ): with patch( "llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow", MockPasswordGrantFlow, ): from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType custom_parsers = { FileType.HTML: MockCustomParser(), # Required FileType.PDF: MockCustomParser(), } reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_pass", client_id="test_client_id", client_secret="test_client_secret", fail_on_error=False, ) assert reader.fail_on_error is False def test_event_system_integration(self, snow_reader): """Test that LlamaIndex event system integration is working.""" from llama_index.readers.service_now.event import ( SNOWKBPageFetchStartEvent, SNOWKBPageFetchCompletedEvent, ) # Test that events can be imported and are proper BaseEvent subclasses assert hasattr(SNOWKBPageFetchStartEvent, "model_fields") assert hasattr(SNOWKBPageFetchCompletedEvent, "model_fields") # Test event creation start_event = SNOWKBPageFetchStartEvent(page_id="KB0010001") assert start_event.page_id == "KB0010001" @patch("os.path.exists") @patch("os.remove") def test_custom_parser_manager_file_cleanup( self, mock_remove, mock_exists, snow_reader ): """Test that custom parser manager cleans up temporary files.""" mock_exists.return_value = True # Access the private method through the manager snow_reader.custom_parser_manager._CustomParserManager__remove_custom_file( "test_file.txt" ) mock_exists.assert_called_once_with("test_file.txt") mock_remove.assert_called_once_with("test_file.txt") def test_format_attachment_header(self, snow_reader): """Test attachment header formatting.""" attachment = {"file_name": "test_document.pdf"} result = snow_reader._format_attachment_header(attachment) assert result == "# test_document.pdf\n" def test_initialize_client_with_valid_credentials(self, mock_pysnc_imports): """Test client initialization with valid credentials.""" with patch( "llama_index.readers.service_now.base.ServiceNowClient", MockServiceNowClient, ): with patch( "llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow", MockPasswordGrantFlow, ): from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType custom_parsers = { FileType.HTML: MockCustomParser(), # Required FileType.PDF: MockCustomParser(), } reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_pass", client_id="test_client_id", client_secret="test_client_secret", ) # Test that client was initialized assert reader.pysnc_client is not None def test_custom_parsers_integration(self, mock_pysnc_imports): """Test integration with custom parsers.""" with patch( "llama_index.readers.service_now.base.ServiceNowClient", MockServiceNowClient, ): with patch( "llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow", MockPasswordGrantFlow, ): from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType # Mock custom parser (use the actual MockCustomParser class instead of MagicMock) custom_parsers = { FileType.HTML: MockCustomParser(), # Required FileType.PDF: MockCustomParser(), } reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_pass", client_id="test_client_id", client_secret="test_client_secret", ) assert reader.custom_parsers == custom_parsers assert FileType.PDF in reader.custom_parsers def test_process_callbacks(self, mock_pysnc_imports): """Test process callbacks functionality.""" with patch( "llama_index.readers.service_now.base.ServiceNowClient", MockServiceNowClient, ): with patch( "llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow", MockPasswordGrantFlow, ): from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType def process_attachment_callback( file_name: str, size: int ) -> tuple[bool, str]: return True, "Processing" def process_document_callback(kb_number: str) -> bool: return True custom_parsers = { FileType.HTML: MockCustomParser(), # Required FileType.PDF: MockCustomParser(), } reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_pass", client_id="test_client_id", client_secret="test_client_secret", process_attachment_callback=process_attachment_callback, process_document_callback=process_document_callback, ) assert reader.process_attachment_callback is not None assert reader.process_document_callback is not None # Test callback execution result = reader.process_attachment_callback("test.pdf", 1000) assert result == (True, "Processing") result = reader.process_document_callback("KB0010001") assert result is True def test_custom_parser_validation(self, mock_pysnc_imports): """Test custom parser validation.""" with patch( "llama_index.readers.service_now.base.ServiceNowClient", MockServiceNowClient, ): with patch( "llama_index.readers.service_now.base.ServiceNowPasswordGrantFlow", MockPasswordGrantFlow, ): from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType custom_parsers = { FileType.HTML: MockCustomParser(), # Required FileType.PDF: MockCustomParser(), } reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_pass", client_id="test_client_id", client_secret="test_client_secret", ) assert reader.custom_parsers[FileType.PDF] is not None # Test parsing with custom parser mock_parser = reader.custom_parsers[FileType.PDF] result = mock_parser.load_data("test.pdf") assert len(result) == 1 assert result[0].text == "Mocked parsed content" def test_smoke_test_instantiation(self, mock_pysnc_imports): """Smoke test to verify SnowKBReader can be instantiated correctly.""" from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType custom_parsers = { FileType.PDF: MockCustomParser(), FileType.HTML: MockCustomParser(), FileType.DOCUMENT: MockCustomParser(), } # This should create without errors (though it will fail on ServiceNow connection) try: reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_password", client_id="test_client_id", client_secret="test_client_secret", ) # Verify basic properties are set correctly assert reader.instance == "test.service-now.com" assert reader.username == "test_user" assert reader.password == "test_password" assert reader.client_id == "test_client_id" assert reader.client_secret == "test_client_secret" assert reader.custom_parsers == custom_parsers assert reader.kb_table == "kb_knowledge" assert reader.fail_on_error is True assert reader.pysnc_client is not None assert reader.custom_parser_manager is not None # Verify the custom parsers are working assert FileType.PDF in reader.custom_parsers assert FileType.HTML in reader.custom_parsers assert FileType.DOCUMENT in reader.custom_parsers except Exception as e: # We expect a ServiceNow connection error in test environment if "ServiceNow client" in str(e) or "Instance name not well-formed" in str( e ): # This is expected since we can't actually connect to ServiceNow in tests pass else: # Any other error is unexpected and should fail the test pytest.fail(f"Unexpected error during SnowKBReader instantiation: {e}") def test_smoke_test_with_minimal_config(self, mock_pysnc_imports): """Smoke test with minimal configuration.""" from llama_index.readers.service_now import SnowKBReader from llama_index.readers.service_now.base import FileType # Test with minimal required configuration custom_parsers = { FileType.HTML: MockCustomParser() # HTML parser is required for article body processing } try: reader = SnowKBReader( instance="test.service-now.com", custom_parsers=custom_parsers, username="test_user", password="test_password", ) # Verify minimal config is set correctly assert reader.instance == "test.service-now.com" assert reader.username == "test_user" assert reader.password == "test_password" assert reader.client_id is None assert reader.client_secret is None assert len(reader.custom_parsers) == 1 assert FileType.HTML in reader.custom_parsers except Exception as e: # We expect a ServiceNow connection error in test environment if "ServiceNow client" in str(e) or "Instance name not well-formed" in str( e ): # This is expected since we can't actually connect to ServiceNow in tests pass else: # Any other error is unexpected and should fail the test pytest.fail( f"Unexpected error during minimal SnowKBReader instantiation: {e}" )
TestSnowKBReader
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/methodOverride3.py
{ "start": 1233, "end": 1301 }
class ____(F1[_T_F]): def do_stuff(self) -> Iterable[_T_F]: ...
F2
python
spyder-ide__spyder
spyder/plugins/shortcuts/tests/test_shortcuts.py
{ "start": 1326, "end": 10889 }
class ____(): def __init__(self, text): self.txt = text def text(self): return self.txt # ---- Tests ShortcutsTable @pytest.mark.skipif( sys.platform.startswith('linux') and running_in_ci(), reason="It fails on Linux due to the lack of a proper X server.") def test_shortcuts(shortcut_table): """Run shortcuts table.""" shortcut_table.show() shortcut_table.check_shortcuts() assert shortcut_table def test_shortcut_in_conf_is_filtered_with_shortcut_data(qtbot): shortcut_table = ShortcutsTable() shortcut_table = load_shortcuts(shortcut_table) qtbot.addWidget(shortcut_table) row_count = shortcut_table.model().rowCount() assert row_count != 0 shortcut_table_empty = ShortcutsTable() shortcut_table_empty.set_shortcut_data([ ShortcutData(qobject=None, name='switch to plots', context='_'), ShortcutData(qobject=None, name='switch to editor', context='_') ]) shortcut_table_empty.load_shortcuts() qtbot.addWidget(shortcut_table_empty) row_count = shortcut_table_empty.model().rowCount() assert row_count == 2 def test_shortcuts_filtering(shortcut_table): """Run shortcuts table.""" # Store original row count row_count = shortcut_table.model().rowCount() # Filter for "debug" shortcut_table.finder = FilterTextMock('debug') shortcut_table.set_regex() # Sorting should be disabled assert not shortcut_table.isSortingEnabled() # Six hits (causes a bit of an issue to hardcode it like this if new # shortcuts are added...) assert shortcut_table.model().rowCount() == 14 # Remove filter text shortcut_table.finder = FilterTextMock('') shortcut_table.set_regex() # Should be sortable again assert shortcut_table.isSortingEnabled() # All entries restored assert shortcut_table.model().rowCount() == row_count # Same thing, but using reset instead shortcut_table.finder = FilterTextMock('debug') shortcut_table.set_regex() shortcut_table.set_regex(reset=True) assert shortcut_table.isSortingEnabled() assert shortcut_table.model().rowCount() == row_count def test_shortcut_filtering_context(shortcut_table): """Test multifiltering by context and name in the table.""" # Verify that the model is filtering by two columns assert len(shortcut_table.model().filters) == 2 # Filter by "console" shortcut_table.finder = FilterTextMock('console') shortcut_table.set_regex() # Verify the number of entries after the regex are 14 # If a new shortcut is added to console, this needs to be changed assert shortcut_table.model().rowCount() == 14 # Filter by "pylint" shortcut_table.finder = FilterTextMock('pylint') shortcut_table.set_regex() # Verify the number of entries after the regex is 1 # If a new shortcut is added to pylint, this needs to be changed assert shortcut_table.model().rowCount() == 2 # ---- Tests ShortcutEditor def test_clear_shortcut(create_shortcut_editor, qtbot): """ Test that pressing on the 'Clear' button to unbind the command from a shortcut is working as expected. """ shortcut_editor = create_shortcut_editor('editor', 'delete line') qtbot.mouseClick(shortcut_editor.button_clear, Qt.LeftButton) assert shortcut_editor.new_sequence == '' def test_press_new_sequence(create_shortcut_editor, qtbot): """ Test that pressing a key sequence with modifier keys is registered as expected by the Shortcut Editor. """ shortcut_editor = create_shortcut_editor('editor', 'delete line') modifiers = Qt.ControlModifier | Qt.ShiftModifier | Qt.AltModifier qtbot.keyClick(shortcut_editor, Qt.Key_D, modifier=modifiers) assert shortcut_editor.new_sequence == 'Ctrl+Alt+Shift+D' assert shortcut_editor.warning == NO_WARNING assert shortcut_editor.button_ok.isEnabled() def test_press_new_compound_sequence(create_shortcut_editor, qtbot): """ Test that pressing a compund of key sequences is registered as expected by the Shortcut Editor. """ shortcut_editor = create_shortcut_editor('editor', 'delete line') qtbot.keyClick(shortcut_editor, Qt.Key_D, modifier=Qt.ControlModifier) qtbot.keyClick(shortcut_editor, Qt.Key_A) qtbot.keyClick(shortcut_editor, Qt.Key_B, modifier=Qt.ControlModifier) qtbot.keyClick(shortcut_editor, Qt.Key_C) qtbot.keyClick(shortcut_editor, Qt.Key_D) assert shortcut_editor.new_sequence == 'Ctrl+D, A, Ctrl+B, C' # The 'D' key press event is discarted because a compound sequence # cannot be composed of more than 4 sub sequences. assert shortcut_editor.warning == NO_WARNING assert shortcut_editor.button_ok.isEnabled() def test_clear_back_new_sequence(create_shortcut_editor, qtbot): """ Test that removing the last key sequence entered and clearing all entered key sequence from the Shortcut Editor is working as expected. """ shortcut_editor = create_shortcut_editor('editor', 'delete line') qtbot.keyClick(shortcut_editor, Qt.Key_X, modifier=Qt.ControlModifier) qtbot.keyClick(shortcut_editor, Qt.Key_A) qtbot.keyClick(shortcut_editor, Qt.Key_B, modifier=Qt.ControlModifier) qtbot.keyClick(shortcut_editor, Qt.Key_C) qtbot.keyClick(shortcut_editor, Qt.Key_D) # Remove last key sequence entered. qtbot.mouseClick(shortcut_editor.button_back_sequence, Qt.LeftButton) assert shortcut_editor.new_sequence == 'Ctrl+X, A, Ctrl+B' assert shortcut_editor.warning == SEQUENCE_CONFLICT assert shortcut_editor.button_ok.isEnabled() # Remove second to last key sequence entered. qtbot.mouseClick(shortcut_editor.button_back_sequence, Qt.LeftButton) assert shortcut_editor.new_sequence == 'Ctrl+X, A' assert shortcut_editor.warning == SEQUENCE_CONFLICT assert shortcut_editor.button_ok.isEnabled() # Clear all entered key sequences. qtbot.mouseClick(shortcut_editor.btn_clear_sequence, Qt.LeftButton) assert shortcut_editor.new_sequence == '' assert shortcut_editor.warning == SEQUENCE_EMPTY assert not shortcut_editor.button_ok.isEnabled() def test_sequence_conflict(create_shortcut_editor, qtbot): """ Test that the Shortcut Editor is able to detect key sequence conflict with other shortcuts. """ shortcut_editor = create_shortcut_editor('editor', 'delete line') # Check that the conflict is detected for a single key sequence. qtbot.keyClick(shortcut_editor, Qt.Key_X, modifier=Qt.ControlModifier) assert shortcut_editor.new_sequence == 'Ctrl+X' assert shortcut_editor.warning == SEQUENCE_CONFLICT assert shortcut_editor.button_ok.isEnabled() # Check that the conflict is detected for a compound of key sequences. qtbot.keyClick(shortcut_editor, Qt.Key_X) assert shortcut_editor.new_sequence == 'Ctrl+X, X' assert shortcut_editor.warning == SEQUENCE_CONFLICT assert shortcut_editor.button_ok.isEnabled() def test_sequence_single_key(create_shortcut_editor, qtbot): """ Test that the Shortcut Editor raise a warning when the first key sequence entered is composed of a single key with no modifier and this single key is not in the list of supported single key sequence. """ shortcut_editor = create_shortcut_editor('editor', 'delete line') # Check this is working as expected for a single key sequence. qtbot.keyClick(shortcut_editor, Qt.Key_D) assert shortcut_editor.new_sequence == 'D' assert shortcut_editor.warning == INVALID_KEY assert not shortcut_editor.button_ok.isEnabled() # Check this is working as expected for a compound of key sequences. qtbot.keyClick(shortcut_editor, Qt.Key_D, modifier=Qt.ControlModifier) assert shortcut_editor.new_sequence == 'D, Ctrl+D' assert shortcut_editor.warning == INVALID_KEY assert not shortcut_editor.button_ok.isEnabled() # Check this is working as expected when a valid single key is pressed. qtbot.mouseClick(shortcut_editor.btn_clear_sequence, Qt.LeftButton) qtbot.keyClick(shortcut_editor, Qt.Key_Home) assert shortcut_editor.new_sequence == 'Home' assert shortcut_editor.warning == NO_WARNING assert shortcut_editor.button_ok.isEnabled() def test_set_sequence_to_default(create_shortcut_editor, qtbot): """ Test that clicking on the button 'Default' set the sequence in the Shortcut Editor to the default value as espected. """ shortcut_editor = create_shortcut_editor('editor', 'delete line') default_sequence = CONF.get( 'shortcuts', "{}/{}".format('editor', 'delete line')) qtbot.mouseClick(shortcut_editor.button_default, Qt.LeftButton) assert shortcut_editor.new_sequence == default_sequence assert shortcut_editor.warning == NO_WARNING assert shortcut_editor.button_ok.isEnabled() def test_invalid_char_in_sequence(create_shortcut_editor, qtbot): """ Test that the key sequence is rejected and a warning is shown if an invalid character is present in the new key sequence. """ shortcut_editor = create_shortcut_editor('editor', 'delete line') # Check this is working as expected for a single key sequence. qtbot.keyClick(shortcut_editor, Qt.Key_Odiaeresis, modifier=Qt.ControlModifier | Qt.AltModifier) assert shortcut_editor.warning == INVALID_KEY assert not shortcut_editor.button_ok.isEnabled() if __name__ == "__main__": pytest.main(['-x', os.path.basename(__file__), '-vv', '-rw'])
FilterTextMock
python
pytorch__pytorch
torch/utils/_pytree.py
{ "start": 25339, "end": 37889 }
class ____(tuple[_T_co, ...]): """A generic type stub for CPython's ``PyStructSequence`` type.""" __slots__: ClassVar[tuple[()]] = () n_fields: Final[int] # type: ignore[misc] n_sequence_fields: Final[int] # type: ignore[misc] n_unnamed_fields: Final[int] # type: ignore[misc] def __init_subclass__(cls) -> NoReturn: """Prohibit subclassing.""" raise TypeError("type 'structseq' is not an acceptable base type") def __new__( cls: type[Self], sequence: Iterable[_T_co], # pyrefly: ignore [bad-function-definition] dict: dict[str, Any] = ..., ) -> Self: raise NotImplementedError # Reference: https://github.com/metaopt/optree/blob/main/optree/typing.py def is_structseq(obj: object | type) -> bool: """Return whether the object is an instance of PyStructSequence or a class of PyStructSequence.""" cls = obj if isinstance(obj, type) else type(obj) return is_structseq_class(cls) # Set if the type allows subclassing (see CPython's Include/object.h) Py_TPFLAGS_BASETYPE: int = 1 << 10 # Reference: https://github.com/metaopt/optree/blob/main/optree/typing.py def is_structseq_class(cls: type) -> bool: """Return whether the class is a class of PyStructSequence.""" return ( isinstance(cls, type) # Check direct inheritance from `tuple` rather than `issubclass(cls, tuple)` and cls.__bases__ == (tuple,) # Check PyStructSequence members and isinstance(getattr(cls, "n_fields", None), int) and isinstance(getattr(cls, "n_sequence_fields", None), int) and isinstance(getattr(cls, "n_unnamed_fields", None), int) # Check the type does not allow subclassing and not bool(cls.__flags__ & Py_TPFLAGS_BASETYPE) # only works for CPython ) # Reference: https://github.com/metaopt/optree/blob/main/optree/typing.py def is_structseq_instance(obj: object) -> bool: """Return whether the object is an instance of PyStructSequence.""" return is_structseq_class(type(obj)) def _tuple_flatten(d: tuple[T, ...]) -> tuple[list[T], Context]: return list(d), None def _tuple_flatten_with_keys( d: tuple[T, ...], ) -> tuple[list[tuple[KeyEntry, T]], Context]: values, context = _tuple_flatten(d) # pyrefly: ignore [bad-return] return [(SequenceKey(i), v) for i, v in enumerate(values)], context def _tuple_unflatten(values: Iterable[T], context: Context) -> tuple[T, ...]: return tuple(values) def _list_flatten(d: list[T]) -> tuple[list[T], Context]: return d, None def _list_flatten_with_keys(d: list[T]) -> tuple[list[tuple[KeyEntry, T]], Context]: values, context = _list_flatten(d) # pyrefly: ignore [bad-return] return [(SequenceKey(i), v) for i, v in enumerate(values)], context def _list_unflatten(values: Iterable[T], context: Context) -> list[T]: return list(values) def _dict_flatten(d: dict[Any, T]) -> tuple[list[T], Context]: return list(d.values()), list(d.keys()) def _dict_flatten_with_keys( d: dict[Any, T], ) -> tuple[list[tuple[KeyEntry, T]], Context]: values, context = _dict_flatten(d) # pyrefly: ignore [bad-return] return [(MappingKey(k), v) for k, v in zip(context, values, strict=True)], context def _dict_unflatten(values: Iterable[T], context: Context) -> dict[Any, T]: return dict(zip(context, values, strict=True)) def _namedtuple_flatten(d: NamedTuple) -> tuple[list[Any], Context]: return list(d), type(d) def _namedtuple_flatten_with_keys( d: NamedTuple, ) -> tuple[list[tuple[KeyEntry, Any]], Context]: values, context = _namedtuple_flatten(d) # pyrefly: ignore [bad-return] return ( [ (GetAttrKey(field), v) for field, v in zip(context._fields, values, strict=True) ], context, ) def _namedtuple_unflatten(values: Iterable[T], context: Context) -> NamedTuple: return cast(NamedTuple, context(*values)) def _namedtuple_serialize(context: Context) -> DumpableContext: if context not in SUPPORTED_SERIALIZED_TYPES: raise NotImplementedError( f"Can't serialize TreeSpec of namedtuple class {context} because we " "didn't register a serializated_type_name. Please register using " "`_register_namedtuple`." ) serialize_node_def = SUPPORTED_SERIALIZED_TYPES[context] serialized_type_name = serialize_node_def.serialized_type_name if serialized_type_name == NO_SERIALIZED_TYPE_NAME_FOUND: raise NotImplementedError( f"Can't serialize TreeSpec of namedtuple class {context} because we " "couldn't find a serializated_type_name. Please register using " "`_register_namedtuple`." ) return serialized_type_name def _namedtuple_deserialize(dumpable_context: DumpableContext) -> Context: if dumpable_context not in SERIALIZED_TYPE_TO_PYTHON_TYPE: raise NotImplementedError( f"Can't deserialize TreeSpec of namedtuple class {dumpable_context} " "because we couldn't find a serializated name." ) typ = SERIALIZED_TYPE_TO_PYTHON_TYPE[dumpable_context] return typ def _ordereddict_flatten(d: OrderedDict[Any, T]) -> tuple[list[T], Context]: return list(d.values()), list(d.keys()) def _ordereddict_flatten_with_keys( d: OrderedDict[Any, T], ) -> tuple[list[tuple[KeyEntry, T]], Context]: values, context = _ordereddict_flatten(d) # pyrefly: ignore [bad-return] return [(MappingKey(k), v) for k, v in zip(context, values, strict=True)], context def _ordereddict_unflatten( values: Iterable[T], context: Context, ) -> OrderedDict[Any, T]: return OrderedDict((key, value) for key, value in zip(context, values, strict=True)) _odict_flatten = _ordereddict_flatten _odict_unflatten = _ordereddict_unflatten def _defaultdict_flatten(d: defaultdict[Any, T]) -> tuple[list[T], Context]: values, dict_context = _dict_flatten(d) return values, [d.default_factory, dict_context] def _defaultdict_flatten_with_keys( d: defaultdict[Any, T], ) -> tuple[list[tuple[KeyEntry, T]], Context]: values, context = _defaultdict_flatten(d) _, dict_context = context # pyrefly: ignore [bad-return] return [ (MappingKey(k), v) for k, v in zip(dict_context, values, strict=True) ], context def _defaultdict_unflatten( values: Iterable[T], context: Context, ) -> defaultdict[Any, T]: default_factory, dict_context = context return defaultdict(default_factory, _dict_unflatten(values, dict_context)) def _defaultdict_serialize(context: Context) -> DumpableContext: default_factory, dict_context = context json_defaultdict = { "default_factory_module": default_factory.__module__, "default_factory_name": default_factory.__qualname__, "dict_context": dict_context, } return json_defaultdict def _defaultdict_deserialize(dumpable_context: DumpableContext) -> Context: if not isinstance(dumpable_context, dict): raise AssertionError("dumpable_context must be a dict") expected_keys = { "default_factory_module", "default_factory_name", "dict_context", } if set(dumpable_context) != expected_keys: raise AssertionError( f"dumpable_context keys must be {expected_keys}, got {set(dumpable_context)}" ) default_factory_module = dumpable_context["default_factory_module"] default_factory_name = dumpable_context["default_factory_name"] if not isinstance(default_factory_module, str): raise AssertionError("default_factory_module must be a string") if not isinstance(default_factory_name, str): raise AssertionError("default_factory_name must be a string") module = importlib.import_module(default_factory_module) default_factory = getattr(module, default_factory_name) dict_context = dumpable_context["dict_context"] return [default_factory, dict_context] def _deque_flatten(d: deque[T]) -> tuple[list[T], Context]: return list(d), d.maxlen def _deque_flatten_with_keys( d: deque[T], ) -> tuple[list[tuple[KeyEntry, T]], Context]: values, context = _deque_flatten(d) # pyrefly: ignore [bad-return] return [(SequenceKey(i), v) for i, v in enumerate(values)], context def _deque_unflatten(values: Iterable[T], context: Context) -> deque[T]: return deque(values, maxlen=context) _private_register_pytree_node( tuple, _tuple_flatten, _tuple_unflatten, serialized_type_name="builtins.tuple", flatten_with_keys_fn=_tuple_flatten_with_keys, ) _private_register_pytree_node( list, _list_flatten, _list_unflatten, serialized_type_name="builtins.list", flatten_with_keys_fn=_list_flatten_with_keys, ) _private_register_pytree_node( dict, _dict_flatten, _dict_unflatten, serialized_type_name="builtins.dict", flatten_with_keys_fn=_dict_flatten_with_keys, ) _private_register_pytree_node( namedtuple, # type: ignore[arg-type] _namedtuple_flatten, _namedtuple_unflatten, serialized_type_name="collections.namedtuple", to_dumpable_context=_namedtuple_serialize, from_dumpable_context=_namedtuple_deserialize, flatten_with_keys_fn=_namedtuple_flatten_with_keys, ) _private_register_pytree_node( OrderedDict, _ordereddict_flatten, _ordereddict_unflatten, serialized_type_name="collections.OrderedDict", flatten_with_keys_fn=_ordereddict_flatten_with_keys, ) _private_register_pytree_node( defaultdict, _defaultdict_flatten, _defaultdict_unflatten, serialized_type_name="collections.defaultdict", to_dumpable_context=_defaultdict_serialize, from_dumpable_context=_defaultdict_deserialize, flatten_with_keys_fn=_defaultdict_flatten_with_keys, ) _private_register_pytree_node( deque, _deque_flatten, _deque_unflatten, serialized_type_name="collections.deque", flatten_with_keys_fn=_deque_flatten_with_keys, ) STANDARD_DICT_TYPES: frozenset[type] = frozenset({dict, OrderedDict, defaultdict}) BUILTIN_TYPES: frozenset[type] = frozenset( { tuple, list, dict, namedtuple, # type: ignore[arg-type] OrderedDict, defaultdict, deque, }, ) @deprecated( "torch.utils._pytree._is_namedtuple_instance is private and will be removed in a future release. " "Please use torch.utils._pytree.is_namedtuple_instance instead.", category=FutureWarning, ) def _is_namedtuple_instance(tree: Any) -> bool: return is_namedtuple_instance(tree) def _get_node_type(tree: Any) -> Any: node_type = type(tree) # All namedtuple types are implicitly registered as pytree nodes. # XXX: Other parts of the codebase expect namedtuple types always return # `namedtuple` instead of the actual namedtuple type. Even if the type # is explicitly registered. if is_namedtuple_class(node_type): return namedtuple return node_type # A leaf is defined as anything that is not a Node. def tree_is_leaf( tree: PyTree, is_leaf: Callable[[PyTree], bool] | None = None, ) -> bool: """Check if a pytree is a leaf. >>> tree_is_leaf(1) True >>> tree_is_leaf(None) True >>> tree_is_leaf([1, 2, 3]) False >>> tree_is_leaf((1, 2, 3), is_leaf=lambda x: isinstance(x, tuple)) True >>> tree_is_leaf({"a": 1, "b": 2, "c": 3}) False >>> tree_is_leaf({"a": 1, "b": 2, "c": None}) False """ if is_leaf is not None and is_leaf(tree): return True return _get_node_type(tree) not in SUPPORTED_NODES @deprecated( "torch.utils._pytree._is_leaf is private and will be removed in a future release. " "Please use torch.utils._pytree.tree_is_leaf instead.", category=FutureWarning, ) def _is_leaf(tree: PyTree, is_leaf: Callable[[PyTree], bool] | None = None) -> bool: return tree_is_leaf(tree, is_leaf=is_leaf) # A TreeSpec represents the structure of a pytree. It holds: # "type": the type of root Node of the pytree # context: some context that is useful in unflattening the pytree # children(): specs for each child of the root Node # num_nodes: the total number of nodes # num_leaves: the number of leaves # num_children: the number of children of the root Node (i.e., len(children())) # is_leaf(): whether the root Node is a leaf @dataclasses.dataclass(init=False, frozen=True, eq=True, repr=False)
structseq
python
kamyu104__LeetCode-Solutions
Python/path-sum-ii.py
{ "start": 181, "end": 841 }
class ____(object): # @param root, a tree node # @param sum, an integer # @return a list of lists of integers def pathSum(self, root, sum): return self.pathSumRecu([], [], root, sum) def pathSumRecu(self, result, cur, root, sum): if root is None: return result if root.left is None and root.right is None and root.val == sum: result.append(cur + [root.val]) return result cur.append(root.val) self.pathSumRecu(result, cur, root.left, sum - root.val) self.pathSumRecu(result, cur,root.right, sum - root.val) cur.pop() return result
Solution
python
apache__airflow
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
{ "start": 14363, "end": 14703 }
class ____(BaseModel): """ Asset schema for responses with fields that are needed for Runtime. """ name: Annotated[str, Field(title="Name")] uri: Annotated[str, Field(title="Uri")] group: Annotated[str, Field(title="Group")] extra: Annotated[dict[str, JsonValue] | None, Field(title="Extra")] = None
AssetResponse
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyflakes/F821_29.py
{ "start": 424, "end": 663 }
class ____(DeclarativeBase): some_mapping: Mapped[list[Bar]] | None = None # Should not trigger F821 (resolveable forward reference) simplified: list[Bar] | None = None # Should not trigger F821 (resolveable forward reference)
Base
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 206199, "end": 208245 }
class ____(GeneratedAirbyteSource): class OAuth20: @public def __init__(self, access_token: str, credentials: Optional[str] = None): self.credentials = check.opt_str_param(credentials, "credentials") self.access_token = check.str_param(access_token, "access_token") class APIToken: @public def __init__(self, email: str, api_token: str, credentials: Optional[str] = None): self.credentials = check.opt_str_param(credentials, "credentials") self.email = check.str_param(email, "email") self.api_token = check.str_param(api_token, "api_token") @public def __init__( self, name: str, start_date: str, subdomain: str, credentials: Union["ZendeskSupportSource.OAuth20", "ZendeskSupportSource.APIToken"], ): """Airbyte Source for Zendesk Support. Documentation can be found at https://docs.airbyte.com/integrations/sources/zendesk-support Args: name (str): The name of the destination. start_date (str): The date from which you'd like to replicate data for Zendesk Support API, in the format YYYY-MM-DDT00:00:00Z. All data generated after this date will be replicated. subdomain (str): This is your Zendesk subdomain that can be found in your account URL. For example, in {my_subdomain}.zendesk.com, where my_subdomain is the value of your subdomain. credentials (Union[ZendeskSupportSource.OAuth20, ZendeskSupportSource.APIToken]): Zendesk service provides two authentication methods. Choose between: `OAuth2.0` or `API token`. """ self.start_date = check.str_param(start_date, "start_date") self.subdomain = check.str_param(subdomain, "subdomain") self.credentials = check.inst_param( credentials, "credentials", (ZendeskSupportSource.OAuth20, ZendeskSupportSource.APIToken), ) super().__init__("Zendesk Support", name)
ZendeskSupportSource
python
huggingface__transformers
tests/models/big_bird/test_modeling_big_bird.py
{ "start": 15715, "end": 24445 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BigBirdModel, BigBirdForPreTraining, BigBirdForMaskedLM, BigBirdForCausalLM, BigBirdForMultipleChoice, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, ) if is_torch_available() else () ) # Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": BigBirdModel, "fill-mask": BigBirdForMaskedLM, "question-answering": BigBirdForQuestionAnswering, "text-classification": BigBirdForSequenceClassification, "text-generation": BigBirdForCausalLM, "token-classification": BigBirdForTokenClassification, "zero-shot": BigBirdForSequenceClassification, } if is_torch_available() else {} ) # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = BigBirdModelTester(self) self.config_tester = ConfigTester(self, config_class=BigBirdConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_retain_grad_hidden_states_attentions(self): # bigbird cannot keep gradients in attentions when `attention_type=block_sparse` if self.model_tester.attention_type == "original_full": super().test_retain_grad_hidden_states_attentions() @slow def test_model_from_pretrained(self): model_name = "google/bigbird-roberta-base" model = BigBirdForPreTraining.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_various_attn_type(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["original_full", "block_sparse"]: config_and_inputs[0].attention_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_fast_integration(self): # fmt: off input_ids = torch.tensor( [[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73],[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 12, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 28, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 18, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231 dtype=torch.long, device=torch_device, ) # fmt: on input_ids = input_ids % self.model_tester.vocab_size input_ids[1] = input_ids[1] - 1 attention_mask = torch.ones((input_ids.shape), device=torch_device) attention_mask[:, :-10] = 0 config, _, _, _, _, _, _ = self.model_tester.prepare_config_and_inputs() torch.manual_seed(0) model = BigBirdModel(config).eval().to(torch_device) with torch.no_grad(): hidden_states = model(input_ids, attention_mask=attention_mask).last_hidden_state self.assertTrue( torch.allclose( hidden_states[0, 0, :5], torch.tensor([1.4825, 0.0774, 0.8226, -0.2962, -0.9593], device=torch_device), atol=1e-3, ) ) def test_auto_padding(self): self.model_tester.seq_length = 241 config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_auto_padding(*config_and_inputs) def test_for_change_to_full_attn(self): self.model_tester.seq_length = 9 config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_change_to_full_attn(*config_and_inputs) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch @slow
BigBirdModelTest
python
walkccc__LeetCode
solutions/107. Binary Tree Level Order Traversal II/107.py
{ "start": 0, "end": 452 }
class ____: def levelOrderBottom(self, root: TreeNode | None) -> list[list[int]]: if not root: return [] ans = [] q = collections.deque([root]) while q: currLevel = [] for _ in range(len(q)): node = q.popleft() currLevel.append(node.val) if node.left: q.append(node.left) if node.right: q.append(node.right) ans.append(currLevel) return ans[::-1]
Solution
python
sympy__sympy
sympy/functions/elementary/hyperbolic.py
{ "start": 18327, "end": 24595 }
class ____(HyperbolicFunction): r""" ``tanh(x)`` is the hyperbolic tangent of ``x``. The hyperbolic tangent function is $\frac{\sinh(x)}{\cosh(x)}$. Examples ======== >>> from sympy import tanh >>> from sympy.abc import x >>> tanh(x) tanh(x) See Also ======== sympy.functions.elementary.hyperbolic.sinh sympy.functions.elementary.hyperbolic.cosh sympy.functions.elementary.hyperbolic.atanh """ def fdiff(self, argindex=1): if argindex == 1: return S.One - tanh(self.args[0])**2 else: raise ArgumentIndexError(self, argindex) def inverse(self, argindex=1): """ Returns the inverse of this function. """ return atanh @classmethod def eval(cls, arg): if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.One elif arg is S.NegativeInfinity: return S.NegativeOne elif arg.is_zero: return S.Zero elif arg.is_negative: return -cls(-arg) else: if arg is S.ComplexInfinity: return S.NaN i_coeff = _imaginary_unit_as_coefficient(arg) if i_coeff is not None: if i_coeff.could_extract_minus_sign(): return -I * tan(-i_coeff) return I * tan(i_coeff) else: if arg.could_extract_minus_sign(): return -cls(-arg) if arg.is_Add: x, m = _peeloff_ipi(arg) if m: tanhm = tanh(m*pi*I) if tanhm is S.ComplexInfinity: return coth(x) else: # tanhm == 0 return tanh(x) if arg.is_zero: return S.Zero if arg.func == asinh: x = arg.args[0] return x/sqrt(1 + x**2) if arg.func == acosh: x = arg.args[0] return sqrt(x - 1) * sqrt(x + 1) / x if arg.func == atanh: return arg.args[0] if arg.func == acoth: return 1/arg.args[0] @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) a = 2**(n + 1) B = bernoulli(n + 1) F = factorial(n + 1) return a*(a - 1) * B/F * x**n def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): if self.args[0].is_extended_real: if deep: hints['complex'] = False return (self.expand(deep, **hints), S.Zero) else: return (self, S.Zero) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() denom = sinh(re)**2 + cos(im)**2 return (sinh(re)*cosh(re)/denom, sin(im)*cos(im)/denom) def _eval_expand_trig(self, **hints): arg = self.args[0] if arg.is_Add: n = len(arg.args) TX = [tanh(x, evaluate=False)._eval_expand_trig() for x in arg.args] p = [0, 0] # [den, num] for i in range(n + 1): p[i % 2] += symmetric_poly(i, TX) return p[1]/p[0] elif arg.is_Mul: coeff, terms = arg.as_coeff_Mul() if coeff.is_Integer and coeff > 1: T = tanh(terms) n = [nC(range(coeff), k)*T**k for k in range(1, coeff + 1, 2)] d = [nC(range(coeff), k)*T**k for k in range(0, coeff + 1, 2)] return Add(*n)/Add(*d) return tanh(arg) def _eval_rewrite_as_tractable(self, arg, limitvar=None, **kwargs): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp - neg_exp)/(pos_exp + neg_exp) def _eval_rewrite_as_exp(self, arg, **kwargs): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp - neg_exp)/(pos_exp + neg_exp) def _eval_rewrite_as_tan(self, arg, **kwargs): return -I * tan(I * arg, evaluate=False) def _eval_rewrite_as_cot(self, arg, **kwargs): return -I / cot(I * arg, evaluate=False) def _eval_rewrite_as_sinh(self, arg, **kwargs): return I*sinh(arg)/sinh(pi*I/2 - arg, evaluate=False) def _eval_rewrite_as_cosh(self, arg, **kwargs): return I*cosh(pi*I/2 - arg, evaluate=False)/cosh(arg) def _eval_rewrite_as_coth(self, arg, **kwargs): return 1/coth(arg) def _eval_as_leading_term(self, x, logx, cdir): from sympy.series.order import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def _eval_is_real(self): arg = self.args[0] if arg.is_real: return True re, im = arg.as_real_imag() # if denom = 0, tanh(arg) = zoo if re == 0 and im % pi == pi/2: return None # check if im is of the form n*pi/2 to make sin(2*im) = 0 # if not, im could be a number, return False in that case return (im % (pi/2)).is_zero def _eval_is_extended_real(self): if self.args[0].is_extended_real: return True def _eval_is_positive(self): if self.args[0].is_extended_real: return self.args[0].is_positive def _eval_is_negative(self): if self.args[0].is_extended_real: return self.args[0].is_negative def _eval_is_finite(self): arg = self.args[0] re, im = arg.as_real_imag() denom = cos(im)**2 + sinh(re)**2 if denom == 0: return False elif denom.is_number: return True if arg.is_extended_real: return True def _eval_is_zero(self): arg = self.args[0] if arg.is_zero: return True
tanh
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_dashboard.py
{ "start": 6745, "end": 10556 }
class ____: @pytest.mark.parametrize( ("params", "expected"), [ ( {"start_date": "2023-01-01T00:00", "end_date": "2023-08-02T00:00"}, { "dag_run_states": {"failed": 1, "queued": 1, "running": 1, "success": 1}, "dag_run_types": {"backfill": 0, "asset_triggered": 1, "manual": 0, "scheduled": 3}, "task_instance_states": { "deferred": 0, "failed": 2, "no_status": 4, "queued": 0, "removed": 0, "restarting": 0, "running": 0, "scheduled": 0, "skipped": 0, "success": 2, "up_for_reschedule": 0, "up_for_retry": 0, "upstream_failed": 0, }, }, ), ( {"start_date": "2023-02-02T00:00", "end_date": "2023-06-02T00:00"}, { "dag_run_states": {"failed": 1, "queued": 0, "running": 0, "success": 0}, "dag_run_types": {"backfill": 0, "asset_triggered": 1, "manual": 0, "scheduled": 0}, "task_instance_states": { "deferred": 0, "failed": 2, "no_status": 0, "queued": 0, "removed": 0, "restarting": 0, "running": 0, "scheduled": 0, "skipped": 0, "success": 0, "up_for_reschedule": 0, "up_for_retry": 0, "upstream_failed": 0, }, }, ), ( {"start_date": "2023-02-02T00:00"}, { "dag_run_states": {"failed": 1, "queued": 1, "running": 1, "success": 0}, "dag_run_types": {"backfill": 0, "asset_triggered": 1, "manual": 0, "scheduled": 2}, "task_instance_states": { "deferred": 0, "failed": 2, "no_status": 4, "queued": 0, "removed": 0, "restarting": 0, "running": 0, "scheduled": 0, "skipped": 0, "success": 0, "up_for_reschedule": 0, "up_for_retry": 0, "upstream_failed": 0, }, }, ), ], ) @pytest.mark.usefixtures("freeze_time_for_dagruns", "make_dag_runs") def test_should_response_200(self, test_client, params, expected): with assert_queries_count(4): response = test_client.get("/dashboard/historical_metrics_data", params=params) assert response.status_code == 200 assert response.json() == expected def test_should_response_401(self, unauthenticated_test_client): response = unauthenticated_test_client.get( "/dashboard/historical_metrics_data", params={"start_date": "2023-02-02T00:00"} ) assert response.status_code == 401 def test_should_response_403(self, unauthorized_test_client): response = unauthorized_test_client.get( "/dashboard/historical_metrics_data", params={"start_date": "2023-02-02T00:00"} ) assert response.status_code == 403
TestHistoricalMetricsDataEndpoint
python
doocs__leetcode
solution/2200-2299/2221.Find Triangular Sum of an Array/Solution.py
{ "start": 0, "end": 224 }
class ____: def triangularSum(self, nums: List[int]) -> int: for k in range(len(nums) - 1, 0, -1): for i in range(k): nums[i] = (nums[i] + nums[i + 1]) % 10 return nums[0]
Solution
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_commas/COM81.py
{ "start": 6907, "end": 7158 }
class ____[T,]: pass # t-string examples kwargs.pop("remove", t"this {trailing_comma}",) kwargs.pop("remove", t"this {f"{trailing_comma}"}",) t"""This is a test. { "Another sentence." if True else "Don't add a trailing comma here ->" }"""
C
python
pytorch__pytorch
test/dynamo/test_install_free_tensors.py
{ "start": 12786, "end": 20778 }
class ____(torch._dynamo.test_case.TestCase): @torch._dynamo.config.patch(inline_inbuilt_nn_modules=True) @torch._dynamo.config.patch(install_free_tensors=True) def check_export_matches_expectation( self, fn_to_export: Callable, expected_num_exported_inputs: int, example_inputs: Sequence[Any], ) -> None: """Exports the original fn, then: * Checks that the number of inputs in the exported is expected_num_exported_inputs * Checks that the exported fn and original fn are equal """ exported_fn = torch._dynamo.export(fn_to_export) out_graph = exported_fn(*example_inputs)[0] actual_num_inputs = get_num_input_nodes(out_graph) self.assertEqual(actual_num_inputs, expected_num_exported_inputs) self.assertEqual(out_graph(*example_inputs), fn_to_export(*example_inputs)) def test_simple_linear(self) -> None: net = SimpleLinearModule() input1 = torch.randn((1, 5)) self.check_export_matches_expectation(net, 1, (input1,)) def test_fn(x: torch.Tensor) -> torch.Tensor: return net(x) self.check_export_matches_expectation(test_fn, 1, (input1,)) # Check multiple inputs def test_fn_2(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return net(x) + net(y) input2 = torch.randn((1, 5)) self.check_export_matches_expectation(test_fn_2, 2, (input1, input2)) def test_simple_batchnorm(self) -> None: net = torch.nn.BatchNorm2d(3) tensor = torch.randn((1, 3, 3, 3)) self.check_export_matches_expectation(net, 1, (tensor,)) def test_fn(x: torch.Tensor) -> torch.Tensor: return net(x) self.check_export_matches_expectation(test_fn, 1, (tensor,)) def test_resnet_structure(self) -> None: net = ResBlock(3, 3) tensor = torch.randn(1, 3, 3, 3) self.check_export_matches_expectation(net, 1, (tensor,)) def test_fn(x: torch.Tensor) -> torch.Tensor: return net(x) self.check_export_matches_expectation(test_fn, 1, (tensor,)) def test_transformer(self) -> None: transformer = torch.nn.Transformer(d_model=32).eval() src = torch.rand(10, 32, 32) tgt = torch.rand(20, 32, 32) self.check_export_matches_expectation(transformer, 2, (src, tgt)) def test_fn(src: torch.Tensor, tgt: torch.Tensor) -> torch.Tensor: return transformer(src, tgt) self.check_export_matches_expectation(test_fn, 2, (src, tgt)) def test_optimizing_params_in_input(self) -> None: param = torch.nn.Parameter(torch.randn(1, 5)) net = SimpleLinearModule() def test_fn(x: torch.Tensor) -> torch.Tensor: return net(x) self.check_export_matches_expectation(net, 1, (param,)) self.check_export_matches_expectation(test_fn, 1, (param,)) x = torch.randn(1, 5) def test_fn2(x: torch.Tensor, param: torch.nn.Parameter) -> torch.Tensor: return net(x) + param # net gets installed, param does not here self.check_export_matches_expectation(test_fn2, 2, (x, param)) def test_fn3( x: torch.Tensor, list_params: list[torch.nn.Parameter] ) -> torch.Tensor: return net(x) + sum(list_params) # list_params should not be installed or inlined here self.check_export_matches_expectation(test_fn3, 2, (x, [param, param])) def test_optimizing_buffer_in_input(self) -> None: buf = torch.nn.Buffer(data=torch.ones((1, 5))) net = SimpleLinearModule() def test_fn(x: torch.Tensor) -> torch.Tensor: return net(x) self.check_export_matches_expectation(net, 1, (buf,)) self.check_export_matches_expectation(test_fn, 1, (buf,)) x = torch.randn(1, 5) def test_fn2(x: torch.Tensor, buf: torch.nn.Buffer) -> torch.Tensor: return net(x) + buf # net gets installed, buf does not here self.check_export_matches_expectation(test_fn2, 2, (x, buf)) def test_optimizing_buffer_and_param_in_input(self) -> None: param = torch.nn.Parameter(torch.randn(5, 1)) buf = torch.nn.Buffer(data=torch.ones((1, 1))) x = torch.randn(1, 5) def test_linear_explicit( x: torch.Tensor, a: torch.Tensor, b: torch.Tensor ) -> torch.Tensor: return a * x + b # Now, param and buf are input so should not be inlined self.check_export_matches_expectation(test_linear_explicit, 3, (x, param, buf)) def test_global_tensor_export(self) -> None: global x x = torch.randn((5, 5)) def fn(a: torch.Tensor) -> torch.Tensor: return a + x inp = torch.randn(5, 5) self.check_export_matches_expectation(fn, 1, (inp,)) def test_nonlocal_closure(self) -> None: x = torch.randn((5, 5)) def fn(a: torch.Tensor) -> torch.Tensor: return a + x inp = torch.randn((5, 5)) self.check_export_matches_expectation(fn, 1, (inp,)) @torch._dynamo.config.patch(inline_inbuilt_nn_modules=True) @torch._dynamo.config.patch(install_free_tensors=True) def test_modify_net_state(self) -> None: class Mod(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(5, 5) self.a = None def forward(self, x): if self.a is None: self.a = torch.ones_like(x) return self.linear(x) + self.a mod = Mod() inp = torch.randn(5, 5) # NOTE: since this fn modifies original class, # need to get reference value before tracing res = mod(inp) mod.a = None ep = torch._dynamo.export(mod) graph, _ = ep(inp) self.assertEqual(graph(inp), res) def test_list_of_tensor(self) -> None: def fn(x: list[torch.Tensor]): return x[0] + x[1] inp = [torch.tensor([1.3, 3.77, 0.1]), torch.tensor([8.7, 6.23, 9.9])] self.check_export_matches_expectation(fn, 2, (inp,)) def test_nested_list_of_tensor(self) -> None: def fn(x: list[Union[list[torch.Tensor], torch.Tensor]]): return x[0][0] + x[1] # type: ignore[index] inp = [[torch.tensor([1.3, 3.77, 0.1])], torch.tensor([8.7, 6.23, 9.9])] self.check_export_matches_expectation(fn, 2, (inp,)) def test_dict_of_tensor(self) -> None: inp_dict = {"temp": torch.tensor(12)} def fn(inp: dict[str, torch.Tensor]) -> torch.Tensor: return inp_dict["temp"] + 5 self.check_export_matches_expectation(fn, 1, (inp_dict,)) # TODO[lucaskabela]: register the flatten/unflatten function so we can evaluate this test @unittest.expectedFailure def test_user_defined_object(self) -> None: class UserDefinedTestClass: def __init__(self, x, y) -> None: self.x = x self.y = y x = torch.randn((3, 3)) y = torch.randn((3, 3)) def fn(obj: UserDefinedTestClass, inp: torch.Tensor) -> torch.Tensor: return obj.x + obj.y + inp z = torch.randn((3, 1)) self.check_export_matches_expectation(fn, 2, (UserDefinedTestClass(x, y), z)) def test_tensors_as_nn_attr(self) -> None: class Mod(torch.nn.Module): def __init__(self): super().__init__() self.a = torch.ones((5, 5)) self.b = torch.ones((5, 5)) def forward(self, x): return self.a + self.b + x mod = Mod() inp = torch.randn(5, 5) self.check_export_matches_expectation(mod, 1, (inp,)) if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
InstallParamsWhenExport
python
pytorch__pytorch
torch/distributed/pipelining/microbatch.py
{ "start": 1182, "end": 1430 }
class ____(_CustomReducer): pass sum_reducer = _LossReducer(torch.tensor(0.0), operator.add) # Default chunking dimension is 0. This is used for the case where the user did # not specify a chunking dimension. DEFAULT_CHUNK_DIM = 0
_LossReducer
python
run-llama__llama_index
llama-index-integrations/program/llama-index-program-guidance/llama_index/program/guidance/base.py
{ "start": 489, "end": 3070 }
class ____(BaseLLMFunctionProgram["GuidanceLLM"]): """ A guidance-based function that returns a pydantic model. Note: this interface is not yet stable. """ def __init__( self, output_cls: Type[BaseModel], prompt_template_str: str, guidance_llm: Optional["GuidanceLLM"] = None, verbose: bool = False, ): if not guidance_llm: llm = guidance_llm else: llm = OpenAI("gpt-3.5-turbo") full_str = prompt_template_str + "\n" self._full_str = full_str self._guidance_program = partial(self.program, llm=llm, silent=not verbose) self._output_cls = output_cls self._verbose = verbose def program( self, llm: "GuidanceLLM", silent: bool, tools_str: str, query_str: str, **kwargs: dict, ) -> "GuidanceLLM": """A wrapper to execute the program with new guidance version.""" given_query = self._full_str.replace("{{tools_str}}", tools_str).replace( "{{query_str}}", query_str ) with user(): llm = llm + given_query with assistant(): llm = llm + gen(stop=".") return llm # noqa: RET504 @classmethod def from_defaults( cls, output_cls: Type[BaseModel], prompt_template_str: Optional[str] = None, prompt: Optional[PromptTemplate] = None, llm: Optional["GuidanceLLM"] = None, **kwargs: Any, ) -> "BaseLLMFunctionProgram": """From defaults.""" if prompt is None and prompt_template_str is None: raise ValueError("Must provide either prompt or prompt_template_str.") if prompt is not None and prompt_template_str is not None: raise ValueError("Must provide either prompt or prompt_template_str.") if prompt is not None: prompt_template_str = prompt.template prompt_template_str = cast(str, prompt_template_str) return cls( output_cls, prompt_template_str, guidance_llm=llm, **kwargs, ) @property def output_cls(self) -> Type[BaseModel]: return self._output_cls def __call__( self, *args: Any, **kwargs: Any, ) -> BaseModel: executed_program = self._guidance_program(**kwargs) response = str(executed_program) return parse_pydantic_from_guidance_program( response=response, cls=self._output_cls )
GuidancePydanticProgram
python
huggingface__transformers
src/transformers/models/instructblip/modeling_instructblip.py
{ "start": 11380, "end": 12480 }
class ____(GradientCheckpointingLayer): def __init__(self, config: InstructBlipConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = InstructBlipAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = InstructBlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward( self, hidden_states: torch.Tensor, **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, **kwargs, ) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual return hidden_states @auto_docstring
InstructBlipEncoderLayer
python
pytorch__pytorch
torch/__init__.py
{ "start": 70529, "end": 70755 }
class ____(_LegacyStorage): @classproperty def dtype(self): _warn_typed_storage_removal(stacklevel=3) return self._dtype @classproperty def _dtype(self): return torch.double
DoubleStorage
python
getsentry__sentry
src/sentry/integrations/api/serializers/rest_framework/data_forwarder.py
{ "start": 1170, "end": 9796 }
class ____(Serializer): organization_id = serializers.IntegerField() is_enabled = serializers.BooleanField(default=True) enroll_new_projects = serializers.BooleanField(default=False) provider = serializers.ChoiceField( choices=[ (DataForwarderProviderSlug.SEGMENT, "Segment"), (DataForwarderProviderSlug.SQS, "Amazon SQS"), (DataForwarderProviderSlug.SPLUNK, "Splunk"), ] ) config = serializers.DictField(child=serializers.CharField(allow_blank=True), default=dict) project_ids = serializers.ListField( child=serializers.IntegerField(), allow_empty=True, required=False, default=list ) def validate_config(self, config) -> SQSConfig | SegmentConfig | SplunkConfig: # Filter out empty string values (cleared optional fields) config = {k: v for k, v in config.items() if v != ""} provider = self.initial_data.get("provider") if provider == DataForwarderProviderSlug.SQS: return self._validate_sqs_config(config) elif provider == DataForwarderProviderSlug.SEGMENT: return self._validate_segment_config(config) elif provider == DataForwarderProviderSlug.SPLUNK: return self._validate_splunk_config(config) raise ValidationError(f"Invalid provider: {provider}") def validate_project_ids(self, project_ids: list[int]) -> list[int]: if not project_ids: return project_ids valid_project_ids = set( Project.objects.filter( organization_id=self.context["organization"].id, id__in=project_ids ).values_list("id", flat=True) ) if len(valid_project_ids) != len(project_ids): invalid_ids = set(project_ids) - valid_project_ids raise ValidationError( f"Invalid project IDs for this organization: {', '.join(map(str, invalid_ids))}" ) return project_ids def _validate_all_fields_present( self, config: dict, required_fields: list[str] | frozenset[str], provider: DataForwarderProviderSlug, ) -> None: missing_fields = [field for field in required_fields if field not in config] if missing_fields: raise ValidationError( f"Missing required {provider.value} fields: {', '.join(missing_fields)}" ) def _validate_sqs_queue_url(self, config: dict, errors: list[str]) -> None: queue_url = config.get("queue_url") sqs_url_pattern = ( r"^https://sqs\.[a-z0-9\-]+\.amazonaws\.com/\d+/[a-zA-Z0-9\-_/]+(?:\.fifo)?$" ) if not queue_url or not re.match(sqs_url_pattern, queue_url): errors.append( "queue_url must be a valid SQS URL format: " "https://sqs.<region>.amazonaws.com/<account>/<queue-name>" ) def _validate_sqs_region(self, config: dict, errors: list[str]) -> None: region = config.get("region") valid_regions = get_regions() if not region or region not in valid_regions: errors.append("region must be a valid AWS region") def _validate_sqs_message_group_id(self, config: dict, errors: list[str]) -> None: message_group_id = config.get("message_group_id") queue_url = config.get("queue_url") if isinstance(queue_url, str) and queue_url.endswith(".fifo") and not message_group_id: errors.append("message_group_id is required for FIFO queues") def _validate_sqs_s3_bucket(self, config: dict, errors: list[str]) -> None: s3_bucket = config.get("s3_bucket") if s3_bucket is not None: s3_bucket_pattern = r"^[a-z0-9\-\.]+$" if not re.match(s3_bucket_pattern, s3_bucket): errors.append("s3_bucket must be a valid S3 bucket name") def _validate_segment_write_key(self, config: dict) -> None: segment_write_key_pattern = r"^[a-zA-Z0-9_\-]+$" write_key = config.get("write_key") if not write_key or not re.match(segment_write_key_pattern, write_key): raise ValidationError("write_key must be a valid Segment write key format") def _validate_splunk_instance_url(self, config: dict, errors: list[str]) -> None: splunk_url_pattern = r"^https?://[a-zA-Z0-9\-\.]+(?::\d+)?(?:/.*)?$" instance_url = config.get("instance_url") if not instance_url or not re.match(splunk_url_pattern, instance_url): errors.append("instance_url must be a valid URL starting with http:// or https://") def _validate_splunk_token_format(self, config: dict, errors: list[str]) -> None: token = config.get("token") if token: splunk_token_pattern = r"^[a-zA-Z0-9\-]+$" if not re.match(splunk_token_pattern, token): errors.append("token must be a valid Splunk HEC token format") def validate(self, attrs: Mapping[str, Any]) -> Mapping[str, Any]: organization_id = attrs.get("organization_id") provider = attrs.get("provider") if organization_id and provider: existing = DataForwarder.objects.filter( organization_id=organization_id, provider=provider ) if self.instance: existing = existing.exclude(id=self.instance.id) if existing.exists(): raise ValidationError( f"A DataForwarder with provider '{provider}' already exists for this organization." ) return attrs def _validate_sqs_config(self, config) -> SQSConfig: self._validate_all_fields_present(config, SQS_REQUIRED_KEYS, DataForwarderProviderSlug.SQS) errors: list[str] = [] self._validate_sqs_queue_url(config, errors) self._validate_sqs_region(config, errors) self._validate_sqs_message_group_id(config, errors) self._validate_sqs_s3_bucket(config, errors) if errors: raise ValidationError(errors) return config def _validate_segment_config(self, config) -> SegmentConfig: self._validate_all_fields_present( config, SEGMENT_REQUIRED_KEYS, DataForwarderProviderSlug.SEGMENT ) self._validate_segment_write_key(config) return config def _validate_splunk_config(self, config) -> SplunkConfig: self._validate_all_fields_present( config, SPLUNK_REQUIRED_KEYS, DataForwarderProviderSlug.SPLUNK ) errors: list[str] = [] self._validate_splunk_instance_url(config, errors) self._validate_splunk_token_format(config, errors) if errors: raise ValidationError(errors) return config def create(self, validated_data: Mapping[str, Any]) -> DataForwarder: project_ids: list[int] = validated_data["project_ids"] data = {k: v for k, v in validated_data.items() if k != "project_ids"} with transaction.atomic(using=router.db_for_write(DataForwarder)): data_forwarder = DataForwarder.objects.create(**data) # Enroll specified projects if project_ids: DataForwarderProject.objects.bulk_create( [ DataForwarderProject( data_forwarder=data_forwarder, project_id=project_id, is_enabled=True, ) for project_id in project_ids ] ) return data_forwarder def update(self, instance: DataForwarder, validated_data: Mapping[str, Any]) -> DataForwarder: project_ids: list[int] = validated_data["project_ids"] with transaction.atomic(using=router.db_for_write(DataForwarder)): for attr, value in validated_data.items(): if attr != "project_ids": setattr(instance, attr, value) instance.save() # Enroll or update specified projects for project_id in project_ids: DataForwarderProject.objects.update_or_create( data_forwarder=instance, project_id=project_id, defaults={"is_enabled": True}, ) # Unenroll projects not in the list DataForwarderProject.objects.filter(data_forwarder=instance).exclude( project_id__in=project_ids ).delete() return instance
DataForwarderSerializer
python
walkccc__LeetCode
solutions/2781. Length of the Longest Valid Substring/2781-2.py
{ "start": 569, "end": 1046 }
class ____: def longestValidSubstring(self, word: str, forbidden: list[str]) -> int: ans = 0 trie = Trie() for s in forbidden: trie.insert(s) # r is the rightmost index to make word[l..r] a valid substring. r = len(word) - 1 for l in range(len(word) - 1, -1, -1): for end in range(l, min(l + 10, r + 1)): if trie.search(word, l, end + 1): r = end - 1 break ans = max(ans, r - l + 1) return ans
Solution
python
jupyterlab__jupyterlab
jupyterlab/labapp.py
{ "start": 8983, "end": 9855 }
class ____(JupyterApp): version = version description = """ Print the configured paths for the JupyterLab application The application path can be configured using the JUPYTERLAB_DIR environment variable. The user settings path can be configured using the JUPYTERLAB_SETTINGS_DIR environment variable or it will fall back to `/lab/user-settings` in the default Jupyter configuration directory. The workspaces path can be configured using the JUPYTERLAB_WORKSPACES_DIR environment variable or it will fall back to '/lab/workspaces' in the default Jupyter configuration directory. """ def start(self): print(f"Application directory: {get_app_dir()}") print(f"User Settings directory: {get_user_settings_dir()}") print(f"Workspaces directory: {get_workspaces_dir()}")
LabPathApp
python
bokeh__bokeh
tests/unit/bokeh/core/property/test_constraints.py
{ "start": 1612, "end": 3696 }
class ____: def test___str__(self) -> None: prop = bcpc.TypeOfAttr(Instance(Parent), "p0", Instance(Child0)) assert str(prop) == "TypeOfAttr(Instance(Parent), 'p0', Instance(Child0))" def test_is_valid(self) -> None: prop0 = bcpc.TypeOfAttr(Instance(Parent), "p0", Instance(Child0)) prop1 = bcpc.TypeOfAttr(Instance(Parent), "p0", Instance(Child1)) prop2 = bcpc.TypeOfAttr(Instance(Parent), "p0", Instance(Child2)) prop3 = bcpc.TypeOfAttr(Instance(Parent), "p1", Int) prop4 = bcpc.TypeOfAttr(Instance(Parent), "p1", String) prop5 = bcpc.TypeOfAttr(Instance(Parent), "p2", String) assert prop0.is_valid(Parent(p0=Child0())) is True assert prop0.is_valid(Parent(p0=Child1())) is True assert prop0.is_valid(Parent(p0=Child2())) is False assert prop1.is_valid(Parent(p0=Child0())) is False assert prop1.is_valid(Parent(p0=Child1())) is True assert prop1.is_valid(Parent(p0=Child2())) is False assert prop2.is_valid(Parent(p0=Child0())) is False assert prop2.is_valid(Parent(p0=Child1())) is False assert prop2.is_valid(Parent(p0=Child2())) is True assert prop3.is_valid(Parent(p0=Child0(), p1=0)) is True assert prop3.is_valid(Parent(p0=Child1(), p1="")) is False assert prop4.is_valid(Parent(p0=Child0(), p1=0)) is False assert prop4.is_valid(Parent(p0=Child1(), p1="")) is True assert prop5.is_valid(Parent(p0=Child1(), p1="")) is False #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- Test___all__ = verify_all(bcpc, ALL)
Test_TypeOfAttr
python
apache__airflow
helm-tests/tests/helm_tests/webserver/test_webserver.py
{ "start": 50458, "end": 51080 }
class ____: """Tests webserver secret key secret.""" def test_should_add_annotations_to_webserver_secret_key_secret(self): docs = render_chart( values={ "airflowVersion": "2.10.5", "webserverSecretAnnotations": {"test_annotation": "test_annotation_value"}, }, show_only=["templates/secrets/webserver-secret-key-secret.yaml"], )[0] assert "annotations" in jmespath.search("metadata", docs) assert jmespath.search("metadata.annotations", docs)["test_annotation"] == "test_annotation_value"
TestWebserverSecretKeySecret
python
plotly__plotly.py
plotly/graph_objs/splom/_stream.py
{ "start": 233, "end": 3489 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "splom" _path_str = "splom.stream" _valid_props = {"maxpoints", "token"} @property def maxpoints(self): """ Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. The 'maxpoints' property is a number and may be specified as: - An int or float in the interval [0, 10000] Returns ------- int|float """ return self["maxpoints"] @maxpoints.setter def maxpoints(self, val): self["maxpoints"] = val @property def token(self): """ The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. The 'token' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["token"] @token.setter def token(self, val): self["token"] = val @property def _prop_descriptions(self): return """\ maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. """ def __init__(self, arg=None, maxpoints=None, token=None, **kwargs): """ Construct a new Stream object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.splom.Stream` maxpoints Sets the maximum number of points to keep on the plots from an incoming stream. If `maxpoints` is set to 50, only the newest 50 points will be displayed on the plot. token The stream id number links a data trace on a plot with a stream. See https://chart-studio.plotly.com/settings for more details. Returns ------- Stream """ super().__init__("stream") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.splom.Stream constructor must be a dict or an instance of :class:`plotly.graph_objs.splom.Stream`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("maxpoints", arg, maxpoints) self._set_property("token", arg, token) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Stream
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/compiler.py
{ "start": 11213, "end": 11463 }
class ____(_BaseCompilerStackEntry, total=False): compile_state: CompileState need_result_map_for_nested: bool need_result_map_for_compound: bool select_0: ReturnsRows insert_from_select: Select[Unpack[TupleAny]]
_CompilerStackEntry
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/config_types.py
{ "start": 6793, "end": 7918 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneConfigType, GrapheneWrappingConfigType) name = "ArrayConfigType" def __init__( self, get_config_type: Callable[[str], ConfigTypeSnap], config_type_snap: ConfigTypeSnap, ): self._config_type_snap = check.inst_param( config_type_snap, "config_type_snap", ConfigTypeSnap ) self._get_config_type = get_config_type super().__init__(**_ctor_kwargs_for_snap(config_type_snap)) def resolve_recursive_config_types( self, graphene_info: ResolveInfo ) -> list[GrapheneConfigTypeUnion]: return [ to_config_type(self._get_config_type, config_type_key) for config_type_key in _recursive_config_type_keys( self._get_config_type, self._config_type_snap ) ] def resolve_of_type(self, graphene_info: ResolveInfo) -> GrapheneConfigTypeUnion: return to_config_type( self._get_config_type, self._config_type_snap.inner_type_key, )
GrapheneArrayConfigType
python
doocs__leetcode
solution/2800-2899/2815.Max Pair Sum in an Array/Solution.py
{ "start": 0, "end": 284 }
class ____: def maxSum(self, nums: List[int]) -> int: ans = -1 for i, x in enumerate(nums): for y in nums[i + 1 :]: v = x + y if ans < v and max(str(x)) == max(str(y)): ans = v return ans
Solution
python
huggingface__transformers
src/transformers/utils/dummy_torchaudio_objects.py
{ "start": 129, "end": 312 }
class ____(metaclass=DummyObject): _backends = ["torchaudio"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchaudio"])
GraniteSpeechFeatureExtractor
python
weaviate__weaviate-python-client
weaviate/collections/queries/near_object/generate/sync.py
{ "start": 316, "end": 465 }
class ____( Generic[Properties, References], _NearObjectGenerateExecutor[ConnectionSync, Properties, References], ): pass
_NearObjectGenerate
python
tensorflow__tensorflow
tensorflow/tools/ci_build/update_version.py
{ "start": 2026, "end": 11758 }
class ____(object): """Version class object that stores SemVer version information.""" def __init__(self, major, minor, patch, identifier_string, version_type): """Constructor. Args: major: major string eg. (1) minor: minor string eg. (3) patch: patch string eg. (1) identifier_string: extension string eg. (-rc0) version_type: version parameter ((SNAPSHOT|NIGHTLY)_VERSION) """ self.major = major self.minor = minor self.patch = patch self.identifier_string = identifier_string self.version_type = version_type self._update_string() def _update_string(self): self.string = "%s.%s.%s%s" % (self.major, self.minor, self.patch, self.identifier_string) def __str__(self): return self.string def set_identifier_string(self, identifier_string): self.identifier_string = identifier_string self._update_string() @property def pep_440_str(self): if self.version_type == SNAPSHOT_VERSION: return_string = "%s.%s.%s%s" % (self.major, self.minor, self.patch, self.identifier_string) return return_string.replace("-", "") else: return_string = "%s.%s.%s" % (self.major, self.minor, self.identifier_string) return return_string.replace("-", "") @staticmethod def parse_from_string(string, version_type): """Returns version object from Semver string. Args: string: version string version_type: version parameter Raises: RuntimeError: If the version string is not valid. """ # Check validity of new version string. if not re.search(r"[0-9]+\.[0-9]+\.[a-zA-Z0-9]+", string): raise RuntimeError("Invalid version string: %s" % string) major, minor, extension = string.split(".", 2) # Isolate patch and identifier string if identifier string exists. extension_split = extension.split("-", 1) patch = extension_split[0] if len(extension_split) == 2: identifier_string = "-" + extension_split[1] else: identifier_string = "" return Version(major, minor, patch, identifier_string, version_type) def _get_regex_match(line, regex, is_last_match=False): match = re.search(regex, line) if match: return (match.group(1), is_last_match) return (None, False) def get_current_semver_version(): """Returns a Version object of current version. Returns: version: Version object of current SemVer string based on information from .bazelrc and tf_version.bzl files. """ # Get current version information. bazel_rc_file = open(BAZEL_RC, "r") wheel_type = "" wheel_build_date = "" wheel_version_suffix = "" for line in bazel_rc_file: wheel_type = ( _get_regex_match(line, '^common --repo_env=ML_WHEEL_TYPE="(.+)"')[0] or wheel_type ) wheel_build_date = ( _get_regex_match( line, '^common --repo_env=ML_WHEEL_BUILD_DATE="([0-9]*)"' )[0] or wheel_build_date ) (wheel_version_suffix, is_matched) = _get_regex_match( line, '^common --repo_env=ML_WHEEL_VERSION_SUFFIX="(.*)"', is_last_match=True, ) if is_matched: break tf_version_bzl_file = open(TF_VERSION_BZL, "r") wheel_version = "" for line in tf_version_bzl_file: (wheel_version, is_matched) = _get_regex_match( line, '^TF_VERSION = "([0-9.]+)"', is_last_match=True ) if is_matched: break (old_major, old_minor, old_patch_num) = wheel_version.split(".") if wheel_type == "nightly": version_type = NIGHTLY_VERSION else: version_type = SNAPSHOT_VERSION old_extension = "" if wheel_type == "nightly": old_extension = "-dev{}".format(wheel_build_date) else: if wheel_build_date: old_extension += "-dev{}".format(wheel_build_date) if wheel_version_suffix: old_extension += wheel_version_suffix return Version(old_major, old_minor, old_patch_num, old_extension, version_type) def update_readme(old_version, new_version): """Update README.""" pep_440_str = new_version.pep_440_str replace_string_in_line(r"%s\.%s\.([[:alnum:]]+)-" % (old_version.major, old_version.minor), "%s-" % pep_440_str, README_MD) def _get_mmp(version): return "%s.%s.%s" % (version.major, version.minor, version.patch) def _get_wheel_type(version): if version.version_type == NIGHTLY_VERSION: return "nightly" else: return "snapshot" def _get_wheel_build_date(version): date_match = re.search(".*dev([0-9]{8}).*", version.identifier_string) if date_match: return date_match.group(1) return "" def _get_wheel_version_suffix(version): return version.identifier_string.replace( "-dev{}".format(_get_wheel_build_date(version)), "" ) def update_tf_version_bzl(old_version, new_version): """Update tf_version.bzl.""" old_mmp = _get_mmp(old_version) new_mmp = _get_mmp(new_version) replace_string_in_line( 'TF_VERSION = "%s"' % old_mmp, 'TF_VERSION = "%s"' % new_mmp, TF_VERSION_BZL, ) def update_bazelrc(old_version, new_version): """Update .bazelrc.""" old_wheel_type = _get_wheel_type(old_version) new_wheel_type = _get_wheel_type(new_version) replace_string_in_line( 'common --repo_env=ML_WHEEL_TYPE="%s"' % old_wheel_type, 'common --repo_env=ML_WHEEL_TYPE="%s"' % new_wheel_type, BAZEL_RC, ) old_wheel_build_date = _get_wheel_build_date(old_version) new_wheel_build_date = _get_wheel_build_date(new_version) replace_string_in_line( 'common --repo_env=ML_WHEEL_BUILD_DATE="%s"' % old_wheel_build_date, 'common --repo_env=ML_WHEEL_BUILD_DATE="%s"' % new_wheel_build_date, BAZEL_RC, ) old_wheel_suffix = _get_wheel_version_suffix(old_version) new_wheel_suffix = _get_wheel_version_suffix(new_version) replace_string_in_line( 'common --repo_env=ML_WHEEL_VERSION_SUFFIX="%s"' % old_wheel_suffix, 'common --repo_env=ML_WHEEL_VERSION_SUFFIX="%s"' % new_wheel_suffix, BAZEL_RC, ) def major_minor_change(old_version, new_version): """Check if a major or minor change occurred.""" major_mismatch = old_version.major != new_version.major minor_mismatch = old_version.minor != new_version.minor if major_mismatch or minor_mismatch: return True return False def check_for_lingering_string(lingering_string): """Check for given lingering strings.""" formatted_string = lingering_string.replace(".", r"\.") try: linger_str_output = subprocess.check_output( ["grep", "-rnoH", formatted_string, TF_SRC_DIR]) linger_strs = linger_str_output.decode("utf8").split("\n") except subprocess.CalledProcessError: linger_strs = [] if linger_strs: print("WARNING: Below are potentially instances of lingering old version " "string \"%s\" in source directory \"%s/\" that are not " "updated by this script. Please check them manually!" % (lingering_string, TF_SRC_DIR)) for linger_str in linger_strs: print(linger_str) else: print("No lingering old version strings \"%s\" found in source directory" " \"%s/\". Good." % (lingering_string, TF_SRC_DIR)) def check_for_old_version(old_version, new_version): """Check for old version references.""" for old_ver in [old_version.string, old_version.pep_440_str]: check_for_lingering_string(old_ver) if major_minor_change(old_version, new_version): old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor) check_for_lingering_string(old_r_major_minor) def main(): """This script updates all instances of version in the tensorflow directory. Requirements: version: The version tag OR nightly: Create a nightly tag with current date Raises: RuntimeError: If the script is not being run from tf source dir """ parser = argparse.ArgumentParser(description="Cherry picking automation.") # Arg information parser.add_argument("--version", help="<new_major_ver>.<new_minor_ver>.<new_patch_ver>", default="") parser.add_argument("--nightly", help="disable the service provisioning step", action="store_true") args = parser.parse_args() check_all_files() old_version = get_current_semver_version() if args.nightly: if args.version: new_version = Version.parse_from_string(args.version, NIGHTLY_VERSION) new_version.set_identifier_string("-dev" + time.strftime("%Y%m%d")) else: new_version = Version(old_version.major, str(old_version.minor), old_version.patch, "-dev" + time.strftime("%Y%m%d"), NIGHTLY_VERSION) else: new_version = Version.parse_from_string(args.version, SNAPSHOT_VERSION) update_tf_version_bzl(old_version, new_version) update_bazelrc(old_version, new_version) update_readme(old_version, new_version) # Print transition details. print("Major: %s -> %s" % (old_version.major, new_version.major)) print("Minor: %s -> %s" % (old_version.minor, new_version.minor)) print("Patch: %s -> %s\n" % (old_version.patch, new_version.patch)) check_for_old_version(old_version, new_version) if __name__ == "__main__": main()
Version
python
pytorch__pytorch
torch/distributed/checkpoint/_experimental/checkpoint_process.py
{ "start": 1213, "end": 1520 }
class ____: """ A dataclass for storing the command to be sent to the worker process. Note: This relies on pickling to send the command to the worker process. Handle backward compatibility accordingly. """ request_type: RequestType payload: dict[str, Any] @dataclass
WorkerRequest
python
pyinstaller__pyinstaller
PyInstaller/lib/modulegraph/modulegraph.py
{ "start": 3623, "end": 5194 }
class ____ (namedtuple("DependencyInfo", ["conditional", "function", "tryexcept", "fromlist"])): __slots__ = () def _merged(self, other): if (not self.conditional and not self.function and not self.tryexcept) \ or (not other.conditional and not other.function and not other.tryexcept): return DependencyInfo( conditional=False, function=False, tryexcept=False, fromlist=self.fromlist and other.fromlist) else: return DependencyInfo( conditional=self.conditional or other.conditional, function=self.function or other.function, tryexcept=self.tryexcept or other.tryexcept, fromlist=self.fromlist and other.fromlist) #FIXME: Shift the following Node class hierarchy into a new #"PyInstaller.lib.modulegraph.node" module. This module is much too long. #FIXME: Refactor "_deferred_imports" from a tuple into a proper lightweight #class leveraging "__slots__". If not for backward compatibility, we'd just #leverage a named tuple -- but this should do just as well. #FIXME: Move the "packagepath" attribute into the "Package" class. Only #packages define the "__path__" special attribute. The codebase currently #erroneously tests whether "module.packagepath is not None" to determine #whether a node is a package or not. However, "isinstance(module, Package)" is #a significantly more reliable test. Refactor the former into the latter.
DependencyInfo
python
numba__numba
numba/cuda/cudadrv/driver.py
{ "start": 89811, "end": 92747 }
class ____(Linker): """ Linker supporting Minor Version Compatibility, backed by the cubinlinker package. """ def __init__(self, max_registers=None, lineinfo=False, cc=None): try: from cubinlinker import CubinLinker except ImportError as err: raise ImportError(_MVC_ERROR_MESSAGE) from err if cc is None: raise RuntimeError("MVCLinker requires Compute Capability to be " "specified, but cc is None") super().__init__(max_registers, lineinfo, cc) arch = f"sm_{cc[0] * 10 + cc[1]}" ptx_compile_opts = ['--gpu-name', arch, '-c'] if max_registers: arg = f"--maxrregcount={max_registers}" ptx_compile_opts.append(arg) if lineinfo: ptx_compile_opts.append('--generate-line-info') self.ptx_compile_options = tuple(ptx_compile_opts) self._linker = CubinLinker(f"--arch={arch}") @property def info_log(self): return self._linker.info_log @property def error_log(self): return self._linker.error_log def add_ptx(self, ptx, name='<cudapy-ptx>'): try: from ptxcompiler import compile_ptx from cubinlinker import CubinLinkerError except ImportError as err: raise ImportError(_MVC_ERROR_MESSAGE) from err compile_result = compile_ptx(ptx.decode(), self.ptx_compile_options) try: self._linker.add_cubin(compile_result.compiled_program, name) except CubinLinkerError as e: raise LinkerError from e def add_file(self, path, kind): try: from cubinlinker import CubinLinkerError except ImportError as err: raise ImportError(_MVC_ERROR_MESSAGE) from err try: with open(path, 'rb') as f: data = f.read() except FileNotFoundError: raise LinkerError(f'{path} not found') name = pathlib.Path(path).name if kind == FILE_EXTENSION_MAP['cubin']: fn = self._linker.add_cubin elif kind == FILE_EXTENSION_MAP['fatbin']: fn = self._linker.add_fatbin elif kind == FILE_EXTENSION_MAP['a']: raise LinkerError(f"Don't know how to link {kind}") elif kind == FILE_EXTENSION_MAP['ptx']: return self.add_ptx(data, name) else: raise LinkerError(f"Don't know how to link {kind}") try: fn(data, name) except CubinLinkerError as e: raise LinkerError from e def complete(self): try: from cubinlinker import CubinLinkerError except ImportError as err: raise ImportError(_MVC_ERROR_MESSAGE) from err try: return self._linker.complete() except CubinLinkerError as e: raise LinkerError from e
MVCLinker
python
has2k1__plotnine
plotnine/facets/facet_grid.py
{ "start": 567, "end": 11755 }
class ____(facet): """ Wrap 1D Panels onto 2D surface Parameters ---------- rows : Variable expressions along the rows of the facets/panels. Each expression is evaluated within the context of the dataframe. cols : Variable expressions along the columns of the facets/panels. Each expression is evaluated within the context of the dataframe. margins : variable names to compute margins for. True will compute all possible margins. space : Control the size of the `x` or `y` sides of the panels. The size also depends to the `scales` parameter. If a string, it should be one of `['fixed', 'free', 'free_x', 'free_y']`{.py}. If a `dict`, it indicates the relative facet size ratios such as: ```python {"x": [1, 2], "y": [3, 1, 1]} ``` This means that in the horizontal direction, the second panel will be twice the length of the first. In the vertical direction the top facet will be the 3 times longer then the second and third facets. Note that the number of dimensions in the list must equal the number of facets that will be produced. shrink : Whether to shrink the scales to the output of the statistics instead of the raw data. labeller : How to label the facets. A string value if it should be one of `["label_value", "label_both", "label_context"]`{.py}. as_table : If `True`, the facets are laid out like a table with the highest values at the bottom-right. If `False` the facets are laid out like a plot with the highest value a the top-right drop : If `True`, all factor levels not used in the data will automatically be dropped. If `False`, all factor levels will be shown, regardless of whether or not they appear in the data. """ def __init__( self, rows: Optional[str | Sequence[str]] = None, cols: Optional[str | Sequence[str]] = None, *, margins: bool | Sequence[str] = False, scales: Literal["fixed", "free", "free_x", "free_y"] = "fixed", space: ( Literal["fixed", "free", "free_x", "free_y"] | FacetSpaceRatios ) = "fixed", shrink: bool = True, labeller: Literal[ "label_value", "label_both", "label_context" ] = "label_value", as_table: bool = True, drop: bool = True, ): facet.__init__( self, scales=scales, shrink=shrink, labeller=labeller, as_table=as_table, drop=drop, ) self.rows, self.cols = parse_grid_rows_cols(rows, cols) self.space = space self.margins = margins def _get_panels_gridspec(self): """ Create gridspec for the panels """ from plotnine._mpl.gridspec import p9GridSpec layout = self.layout space = self.space ratios = {} # Calculate the width (x) & height (y) ratios for space=free[xy] if isinstance(space, str): if space in {"free", "free_x"}: pidx: list[int] = ( layout.layout.sort_values("COL") .drop_duplicates("COL") .index.tolist() ) panel_views = [layout.panel_params[i] for i in pidx] ratios["width_ratios"] = [ np.ptp(pv.x.range) for pv in panel_views ] if space in {"free", "free_y"}: pidx = ( layout.layout.sort_values("ROW") .drop_duplicates("ROW") .index.tolist() ) panel_views = [layout.panel_params[i] for i in pidx] ratios["height_ratios"] = [ np.ptp(pv.y.range) for pv in panel_views ] if isinstance(self.space, dict): if len(self.space["x"]) != self.ncol: raise ValueError( "The number of x-ratios for the facet space sizes " "should match the number of columns." ) if len(self.space["y"]) != self.nrow: raise ValueError( "The number of y-ratios for the facet space sizes " "should match the number of rows." ) ratios["width_ratios"] = self.space.get("x") ratios["height_ratios"] = self.space.get("y") return p9GridSpec( self.nrow, self.ncol, self.figure, nest_into=self.plot._gridspec[0], **ratios, ) def compute_layout(self, data: list[pd.DataFrame]) -> pd.DataFrame: if not self.rows and not self.cols: self.nrow, self.ncol = 1, 1 return layout_null() base_rows = combine_vars( data, self.environment, self.rows, drop=self.drop ) if not self.as_table: # Reverse the order of the rows base_rows = base_rows[::-1] base_cols = combine_vars( data, self.environment, self.cols, drop=self.drop ) base = cross_join(base_rows, base_cols) if self.margins: base = add_margins(base, (self.rows, self.cols), self.margins) base = base.drop_duplicates().reset_index(drop=True) n = len(base) panel = ninteraction(base, drop=True) panel = pd.Categorical(panel, categories=range(1, n + 1)) if self.rows: rows = ninteraction(base[self.rows], drop=True) else: rows = [1] * len(panel) if self.cols: cols = ninteraction(base[self.cols], drop=True) else: cols = [1] * len(panel) layout = pd.DataFrame( { "PANEL": panel, "ROW": rows, "COL": cols, } ) layout = pd.concat([layout, base], axis=1) layout = layout.sort_values("PANEL") layout.reset_index(drop=True, inplace=True) # Relax constraints, if necessary layout["SCALE_X"] = layout["COL"] if self.free["x"] else 1 layout["SCALE_Y"] = layout["ROW"] if self.free["y"] else 1 layout["AXIS_X"] = layout["ROW"] == layout["ROW"].max() layout["AXIS_Y"] = layout["COL"] == layout["COL"].min() self.nrow = layout["ROW"].max() self.ncol = layout["COL"].max() return layout def map(self, data: pd.DataFrame, layout: pd.DataFrame) -> pd.DataFrame: if not len(data): data["PANEL"] = pd.Categorical( [], categories=layout["PANEL"].cat.categories, ordered=True ) return data vars = (*self.rows, *self.cols) margin_vars: tuple[list[str], list[str]] = ( list(data.columns.intersection(self.rows)), list(data.columns.intersection(self.cols)), ) data = add_margins(data, margin_vars, self.margins) facet_vals = eval_facet_vars(data, vars, self.environment) data, facet_vals = add_missing_facets(data, layout, vars, facet_vals) # assign each point to a panel if len(facet_vals) and len(facet_vals.columns): keys = join_keys(facet_vals, layout, vars) data["PANEL"] = match(keys["x"], keys["y"], start=1) else: # Special case of no facetting data["PANEL"] = 1 # matching dtype and # the categories(panel numbers) for the data should be in the # same order as the panels. i.e the panels are the reference, # they "know" the right order data["PANEL"] = pd.Categorical( data["PANEL"], categories=layout["PANEL"].cat.categories, ordered=True, ) data.reset_index(drop=True, inplace=True) return data def make_strips(self, layout_info: layout_details, ax: Axes) -> Strips: lst = [] if layout_info.is_top and self.cols: s = strip(self.cols, layout_info, self, ax, "top") lst.append(s) if layout_info.is_right and self.rows: s = strip(self.rows, layout_info, self, ax, "right") lst.append(s) return Strips(lst) def parse_grid_rows_cols( rows: Optional[str | Sequence[str]] = None, cols: Optional[str | Sequence[str]] = None, ) -> tuple[list[str], list[str]]: """ Return the rows & cols that make up the grid """ if cols is None and isinstance(rows, str): # formula return parse_grid_facets_old(rows) if cols is None: cols = [] elif isinstance(cols, str): cols = [cols] if rows is None: rows = [] elif isinstance(rows, str): rows = [rows] return list(rows), list(cols) def parse_grid_facets_old( facets: str | tuple[str | Sequence[str], str | Sequence[str]], ) -> tuple[list[str], list[str]]: """ Return two lists of facetting variables, for the rows & columns This parse the old & silently deprecated style. """ valid_seqs = [ "(var1,)", "('var1', '.')", "('var1', 'var2')", "('.', 'var1')", "((var1, var2), (var3, var4))", ] error_msg_s = ( f"Valid sequences for specifying 'facets' look like {valid_seqs}" ) valid_forms = [ "var1", "var1 ~ .", "var1 ~ var2", ". ~ var1", "var1 + var2 ~ var3 + var4", ". ~ func(var1) + func(var2)", ". ~ func(var1+var3) + func(var2)", ] + valid_seqs error_msg_f = f"Valid formula for 'facet_grid' look like {valid_forms}" if not isinstance(facets, str): if len(facets) == 1: rows = ensure_list_spec(facets[0]) cols = [] elif len(facets) == 2: rows = ensure_list_spec(facets[0]) cols = ensure_list_spec(facets[1]) else: raise PlotnineError(error_msg_s) return list(rows), list(cols) if "~" not in facets: rows = ensure_list_spec(facets) return list(rows), [] # Example of allowed formulae # "c ~ a + b' # '. ~ func(a) + func(b)' # 'func(c) ~ func(a+1) + func(b+2)' try: lhs, rhs = facets.split("~") except ValueError as e: raise PlotnineError(error_msg_f) from e else: lhs = lhs.strip() rhs = rhs.strip() rows = ensure_list_spec(lhs) cols = ensure_list_spec(rhs) return list(rows), list(cols) def ensure_list_spec(term: Sequence[str] | str) -> Sequence[str]: """ Convert a str specification to a list spec e.g. 'a' -> ['a'] 'a + b' -> ['a', 'b'] '.' -> [] '' -> [] """ if isinstance(term, str): splitter = " + " if " + " in term else "+" if term in [".", ""]: return [] return [var.strip() for var in term.split(splitter)] else: return term
facet_grid
python
pytorch__pytorch
torch/_inductor/fx_passes/group_batch_fusion.py
{ "start": 44917, "end": 47700 }
class ____(BatchPointwiseOpsFusionFactory): """ Batch simple match related ops such as nan_to_num in pre grad pass. """ def __init__(self, op, **kwargs): super().__init__(op, **kwargs) self.op = op def match(self, node: torch.fx.Node): input = get_arg_value(node, 0, "input") if CallFunctionVarArgs(self.op).match(node) and is_node_meta_valid(node): # check the input has the same shape and its users have the same target # check all clamp operators have the same min and max values, and # nan_to_num operators use the same default value. child = next(iter(node.users.keys())) group_key = ( str(input.meta["example_value"].shape) + str(node.kwargs) + str(child.target) ) else: group_key = None return group_key def fuse(self, graph: torch.fx.GraphModule, subset: list[torch.fx.Node]): batch_nodes = [] batch_inputs = [] batch_inputs_metadata = [] kwargs = subset[0].kwargs for node in subset: batch_nodes.append(node) input = get_arg_value(node, 0, "input") batch_inputs.append(input) batch_inputs_metadata.append(input.meta["example_value"]) with graph.inserting_before(subset[0]): # type: ignore[operator] stack_inputs = graph.call_function( # type: ignore[operator] torch.stack, args=(batch_inputs,), kwargs={"dim": 0} ) update_stack_example_value(stack_inputs, batch_inputs_metadata) batch_op = graph.call_function( # type: ignore[operator] self.op, args=(stack_inputs,), kwargs=kwargs, ) batch_op.meta["example_value"] = self.op( stack_inputs.meta["example_value"], **kwargs ) unbind_op = graph.call_function( # type: ignore[operator] torch.unbind, args=(batch_op,), kwargs={"dim": 0} ) unbind_op.meta["example_value"] = torch.unbind( batch_op.meta["example_value"], dim=0 ) for i, node in enumerate(batch_nodes): with graph.inserting_after(unbind_op): # type: ignore[operator] getitem = graph.call_function(operator.getitem, args=(unbind_op, i)) # type: ignore[operator] node.replace_all_uses_with(getitem) getitem.meta.update(node.meta) graph.erase_node(node) # type: ignore[operator] counters["inductor"]["batch_" + self.op.__name__.lower().split(".")[0]] += 1 @register_fusion("batch_tanh")
BatchMathOpsPreGradFusion
python
google__jax
tests/mosaic/gpu_test.py
{ "start": 95266, "end": 100959 }
class ____(TestCase): def test_wg_communication(self): i32 = ir.IntegerType.get_signless(32) def kernel(ctx, dst, scratch): tmp, barriers = scratch del ctx # Unused. wg_idx = arith.divui(mgpu.warp_idx(), c(4, i32)) is_first_wg = arith.cmpi(arith.CmpIPredicate.eq, wg_idx, c(0, i32)) is_second_wg = arith.cmpi(arith.CmpIPredicate.eq, wg_idx, c(1, i32)) arr = mgpu.FragmentedArray.splat( arith.addi(wg_idx, c(1, i32)), (128,), mgpu.WGStridedFragLayout((128,), 1), is_signed=False, ) with ir.InsertionPoint(scf.IfOp(is_first_wg).then_block): arr.store_untiled(tmp) barriers[0].arrive() # Signal that tmp is ready. barriers[1].wait() # Wait for the other warp to produce tmp. final_arr = arr + mgpu.FragmentedArray.load_strided( tmp, is_signed=False ) final_arr.store_untiled(memref_slice(dst, 0), optimized=False) scf.yield_([]) with ir.InsertionPoint(scf.IfOp(is_second_wg).then_block): barriers[0].wait() final_arr = arr + mgpu.FragmentedArray.load_strided( tmp, is_signed=False ) barriers[2].arrive() barriers[2].wait() # Synchronize this warpgroup before we overwrite tmp. arr.store_untiled(tmp) barriers[1].arrive() # Signal that tmp is ready. final_arr.store_untiled(memref_slice(dst, 1), optimized=False) scf.yield_([]) out_shape = jax.ShapeDtypeStruct((2, 128), jnp.int32) y = mgpu.as_gpu_kernel( kernel, (1, 1, 1), (2 * 128, 1, 1), (), out_shape, ( jax.ShapeDtypeStruct((128,), jnp.int32), mgpu.Barrier(arrival_count=128, num_barriers=3), ), )() np.testing.assert_array_equal(y, np.full_like(y, 3, dtype=np.int32)) @parameterized.named_parameters( ( f"_{''.join(map(str, collective_dims))}={collective_size}{'_' + ''.join(map(str, noncollective_dims)) if noncollective_dims else ''}{'_group' if group_dims else ''}", collective_dims, noncollective_dims, collective_size, group_dims, ) for collective_dims in itertools.chain.from_iterable( itertools.combinations(Dimension, n) for n in range(1, 4) ) for noncollective_dims in itertools.chain.from_iterable( itertools.combinations(Dimension, n) for n in range(3) ) for collective_size in (1, 2, 4) for group_dims in (False,) + ((True,) if len(collective_dims) > 1 else ()) if all(d not in noncollective_dims for d in collective_dims) ) def test_collective_arrive(self, collective_dims, noncollective_dims, collective_size, group_dims): i32 = ir.IntegerType.get_signless(32) index = ir.IndexType.get() cluster = [1, 1, 1] for d in collective_dims: cluster[d] = collective_size for d in noncollective_dims: cluster[d] = 2 if math.prod(cluster) > jtu.get_cuda_nonportable_max_cluster_size(): self.skipTest("Cluster too big") is_trivial = math.prod(cluster[d] for d in collective_dims) == 1 def kernel(ctx, dst, mask, collective_barrier): cluster_idx = ctx.cluster_idx() if not is_trivial: memref.store(collective_barrier.cluster_mask, mask, [cluster_idx]) else: assert collective_barrier.cluster_mask is None collective_barrier.arrive() collective_barrier.wait() tid = thread_idx() linear_idx = arith.index_cast(index, tid) stride = c(128, index) for d in gpu.Dimension: linear_idx = arith.addi(linear_idx, arith.muli(gpu.block_id(d), stride)) stride = arith.muli(stride, gpu.grid_dim(d)) memref.store(arith.index_cast(i32, linear_idx), dst, [linear_idx]) out_shape = jax.ShapeDtypeStruct((math.prod(cluster) * 128,), jnp.int32) mask_shape = jax.ShapeDtypeStruct((math.prod(cluster),), jnp.int32) barrier_dims = collective_dims if group_dims: barrier_dims = (collective_dims[:2], *collective_dims[2:]) scratch = mgpu.ClusterBarrier(barrier_dims) y, mask = mgpu.as_gpu_kernel( kernel, cluster, (128, 1, 1), (), (out_shape, mask_shape), scratch, cluster=cluster, )() np.testing.assert_array_equal( y, np.arange(math.prod(cluster) * 128, dtype=np.int32) ) if not is_trivial: # Verify that the mask is correct. Blocks are column-major, hence the transpose. block_bits = 1 << np.arange(math.prod(cluster), dtype=np.int32).reshape(cluster[::-1]).T expected_mask = 0 for bd in barrier_dims: if isinstance(bd, gpu.Dimension): bd = (bd,) least_significant_slice = tuple( slice(None) if d in bd else 0 for d in gpu.Dimension ) mask_bits = block_bits[least_significant_slice] expected_mask |= np.bitwise_or.reduce(mask_bits, axis=None) self.assertEqual(min(mask), expected_mask) def test_collective_arrival_count(self): i32 = ir.IntegerType.get_signless(32) cluster = [2, 1, 1] def kernel(ctx, dst, collective_barrier): collective_barrier.arrive() collective_barrier.arrive() collective_barrier.arrive() collective_barrier.arrive() collective_barrier.wait() memref.store(arith.constant(i32, 1), dst, []) out_shape = jax.ShapeDtypeStruct((), jnp.int32) scratch = mgpu.ClusterBarrier((gpu.Dimension.x,), arrival_count=4) y = mgpu.as_gpu_kernel( kernel, cluster, (128, 1, 1), (), out_shape, scratch, cluster=cluster, )() np.testing.assert_array_equal(y, np.ones((), dtype=np.int32))
BarrierTest
python
django-haystack__django-haystack
test_haystack/elasticsearch5_tests/test_backend.py
{ "start": 60925, "end": 61797 }
class ____(TestCase): def setUp(self): self.raw_es = elasticsearch.Elasticsearch( settings.HAYSTACK_CONNECTIONS["elasticsearch"]["URL"] ) def test_recreate_index(self): clear_elasticsearch_index() sb = connections["elasticsearch"].get_backend() sb.silently_fail = True sb.setup() original_mapping = self.raw_es.indices.get_mapping(index=sb.index_name) sb.clear() sb.setup() try: updated_mapping = self.raw_es.indices.get_mapping(sb.index_name) except elasticsearch.NotFoundError: self.fail("There is no mapping after recreating the index") self.assertEqual( original_mapping, updated_mapping, "Mapping after recreating the index differs from the original one", )
RecreateIndexTestCase
python
joke2k__faker
tests/providers/test_date_time.py
{ "start": 31870, "end": 32622 }
class ____(unittest.TestCase): """Tests date_time in the ru_RU locale""" def setUp(self): self.fake = Faker("ru_RU") Faker.seed(0) def test_day(self): for _ in range(50): day = self.fake.day_of_week() assert isinstance(day, str) assert day in RuProvider.DAY_NAMES.values() def test_month(self): for _ in range(50): month = self.fake.month_name() assert isinstance(month, str) assert month in RuProvider.MONTH_NAMES.values() def test_timezone(self): for _ in range(100): timezone = self.fake.timezone() assert isinstance(timezone, str) assert re.match(r"[А-Яа-я]", timezone)
TestRuRu
python
catalyst-team__catalyst
examples/detection/models/yolo_x.py
{ "start": 5144, "end": 5919 }
class ____(nn.Module): """Focus width and height information into channel space.""" def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="silu"): super().__init__() self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act) def forward(self, x): # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) patch_top_left = x[..., ::2, ::2] patch_top_right = x[..., ::2, 1::2] patch_bot_left = x[..., 1::2, ::2] patch_bot_right = x[..., 1::2, 1::2] x = torch.cat( ( patch_top_left, patch_bot_left, patch_top_right, patch_bot_right, ), dim=1, ) return self.conv(x)
Focus
python
huggingface__transformers
src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
{ "start": 43691, "end": 48538 }
class ____(UniSpeechSatPreTrainedModel): def __init__(self, config: UniSpeechSatConfig): super().__init__(config) self.unispeech_sat = UniSpeechSatModel(config) self.dropout_features = nn.Dropout(config.feat_quantizer_dropout) self.quantizer = UniSpeechSatGumbelVectorQuantizer(config) self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim) self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim) self.dropout = nn.Dropout(config.final_dropout) self.speaker_proj = nn.Linear(config.hidden_size, config.codevector_dim) self.label_embeddings_concat = nn.Parameter(torch.FloatTensor(config.num_clusters, config.codevector_dim)) self.label_embeddings_concat.data.zero_() self.layer_norm_for_extract = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if self.config.do_stable_layer_norm: self.layer_norm_for_extract.requires_grad = False # Initialize weights and apply final processing self.post_init() def set_gumbel_temperature(self, temperature: int): """ Set the Gumbel softmax temperature to a given value. Only necessary for training """ self.quantizer.temperature = temperature def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.unispeech_sat.feature_extractor._freeze_parameters() @staticmethod def compute_contrastive_logits( target_features: torch.FloatTensor, negative_features: torch.FloatTensor, predicted_features: torch.FloatTensor, temperature: int = 1, ): """ Compute logits for contrastive loss based using cosine similarity as the distance measure between `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied. """ target_features = torch.cat([target_features, negative_features], dim=0) logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1) logits = logits.type_as(target_features) # apply temperature logits = logits / temperature return logits @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, UniSpeechSatForPreTrainingOutput]: r""" Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining >>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-sat-base") >>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base") >>> # TODO: Add full pretraining example ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.unispeech_sat( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) transformer_features = outputs[0] # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1]) # TODO(PVP) - add pretraining logic and add to tests logits = extract_features loss = quantized_features = codevector_perplexity = None if not return_dict: if loss is not None: return (loss, logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return (logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return UniSpeechSatForPreTrainingOutput( loss=loss, logits=logits, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) _HIDDEN_STATES_START_POSITION = 2 @auto_docstring( custom_intro=""" UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). """ )
UniSpeechSatForPreTraining
python
numba__numba
numba/cuda/tests/cudadrv/test_context_stack.py
{ "start": 679, "end": 2149 }
class ____(CUDATestCase): def tearDown(self): super().tearDown() cuda.close() def test_context_memory(self): try: mem = cuda.current_context().get_memory_info() except NotImplementedError: self.skipTest('EMM Plugin does not implement get_memory_info()') self.assertIsInstance(mem.free, numbers.Number) self.assertEqual(mem.free, mem[0]) self.assertIsInstance(mem.total, numbers.Number) self.assertEqual(mem.total, mem[1]) self.assertLessEqual(mem.free, mem.total) @unittest.skipIf(len(cuda.gpus) < 2, "need more than 1 gpus") @skip_on_cudasim('CUDA HW required') def test_forbidden_context_switch(self): # Cannot switch context inside a `cuda.require_context` @cuda.require_context def switch_gpu(): with cuda.gpus[1]: pass with cuda.gpus[0]: with self.assertRaises(RuntimeError) as raises: switch_gpu() self.assertIn("Cannot switch CUDA-context.", str(raises.exception)) @unittest.skipIf(len(cuda.gpus) < 2, "need more than 1 gpus") def test_accepted_context_switch(self): def switch_gpu(): with cuda.gpus[1]: return cuda.current_context().device.id with cuda.gpus[0]: devid = switch_gpu() self.assertEqual(int(devid), 1) @skip_on_cudasim('CUDA HW required')
TestContextAPI