language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0151_addons_linkpreviews_selector.py
{ "start": 149, "end": 1186 }
class ____(migrations.Migration): safe = Safe.before_deploy() dependencies = [ ("projects", "0150_add_ssh_key_with_write_access"), ] operations = [ migrations.AddField( model_name="addonsconfig", name="linkpreviews_selector", field=models.CharField( blank=True, help_text="CSS selector to select links you want enabled for link previews. Leave it blank for auto-detect all links in your main page content.", max_length=128, null=True, ), ), migrations.AddField( model_name="historicaladdonsconfig", name="linkpreviews_selector", field=models.CharField( blank=True, help_text="CSS selector to select links you want enabled for link previews. Leave it blank for auto-detect all links in your main page content.", max_length=128, null=True, ), ), ]
Migration
python
ray-project__ray
python/ray/data/tests/util.py
{ "start": 239, "end": 2968 }
class ____: def __init__(self): self.count = 0 def increment(self): self.count += 1 def get(self): return self.count def reset(self): self.count = 0 @contextmanager def gen_bin_files(n): with tempfile.TemporaryDirectory() as temp_dir: paths = [] for i in range(n): path = os.path.join(temp_dir, f"{i}.bin") paths.append(path) with open(path, "wb") as fp: to_write = str(i) * 500 fp.write(to_write.encode()) yield (temp_dir, paths) def column_udf(col, udf): @functools.wraps(udf) def wraps(row): return {col: udf(row[col])} return wraps def column_udf_class(col, udf): class UDFClass: def __call__(self, row): return {col: udf(row[col])} return UDFClass # Ex: named_values("id", [1, 2, 3]) # Ex: named_values(["id", "id2"], [(1, 1), (2, 2), (3, 3)]) def named_values(col_names, tuples): output = [] if isinstance(col_names, list): for t in tuples: output.append(dict(zip(col_names, t))) else: for t in tuples: output.append({col_names: t}) return output def extract_values(col_name, tuples): return [t[col_name] for t in tuples] def run_op_tasks_sync(op: PhysicalOperator, only_existing=False): """Run tasks of a PhysicalOperator synchronously. By default, this function will run until the op no longer has any active tasks. If only_existing is True, this function will only run the currently existing tasks. """ tasks = op.get_active_tasks() while tasks: ref_to_task = {task.get_waitable(): task for task in tasks} ready, _ = ray.wait( [task.get_waitable() for task in tasks], num_returns=len(tasks), fetch_local=False, timeout=0.1, ) for ref in ready: task = ref_to_task[ref] if isinstance(task, DataOpTask): task.on_data_ready(None) else: assert isinstance(task, MetadataOpTask) task.on_task_finished() if only_existing: return tasks = op.get_active_tasks() def run_one_op_task(op): """Run one task of a PhysicalOperator.""" tasks = op.get_active_tasks() waitable_to_tasks = {task.get_waitable(): task for task in tasks} ready, _ = ray.wait( list(waitable_to_tasks.keys()), num_returns=1, fetch_local=False ) task = waitable_to_tasks[ready[0]] if isinstance(task, DataOpTask): task.on_data_ready(None) else: assert isinstance(task, MetadataOpTask) task.on_task_finished()
Counter
python
mahmoud__glom
glom/mutation.py
{ "start": 1213, "end": 2246 }
class ____(PathAssignError): """This :exc:`GlomError` subtype is raised when an assignment fails, stemming from an :func:`~glom.delete` call or other :class:`~glom.Delete` usage. One example would be deleting an out-of-range position in a list:: >>> delete(["short", "list"], Path(5)) Traceback (most recent call last): ... PathDeleteError: could not delete 5 on object at Path(), got error: IndexError(... Other assignment failures could be due to deleting a read-only ``@property`` or exception being raised inside a ``__delattr__()``. """ def get_message(self): return ('could not delete %r on object at %r, got error: %r' % (self.dest_name, self.path, self.exc)) def _apply_for_each(func, path, val): layers = path.path_t.__stars__() if layers: for i in range(layers - 1): val = sum(val, []) # flatten out the extra layers for inner in val: func(inner) else: func(val)
PathDeleteError
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/execution/executors/asyncio.py
{ "start": 1034, "end": 1794 }
class ____(object): def __init__(self, loop=None): if loop is None: loop = get_event_loop() self.loop = loop self.futures = [] def wait_until_finished(self): # if there are futures to wait for while self.futures: # wait for the futures to finish futures = self.futures self.futures = [] self.loop.run_until_complete(wait(futures)) def execute(self, fn, *args, **kwargs): result = fn(*args, **kwargs) if isinstance(result, Future) or iscoroutine(result): future = ensure_future(result, loop=self.loop) self.futures.append(future) return Promise.resolve(future) return result
AsyncioExecutor
python
OmkarPathak__pygorithm
tests/test_searching.py
{ "start": 1216, "end": 1771 }
class ____(unittest.TestCase): def test_bfs(self): self.graph = { 'A': {'B', 'C', 'E'}, 'B': {'A', 'D', 'F'}, 'C': {'A', 'G'}, 'D': {'B'}, 'F': {'B'}, 'E': {'A'}, 'G': {'C'} } result = breadth_first_search.search(self.graph, 'A') self.assertEqual(result, {'A', 'B', 'D', 'F', 'C', 'G', 'E'}) result = breadth_first_search.search(self.graph, 'G') self.assertEqual(result, {'G', 'C', 'A', 'B', 'D', 'F', 'E'})
TestBFSSearch
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/errors.py
{ "start": 10249, "end": 10639 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneError,) name = "ModeNotFoundError" mode = graphene.NonNull(graphene.String) def __init__(self, mode, selector): super().__init__() self.mode = check.str_param(mode, "mode") self.message = f"Mode {mode} not found in pipeline {selector.job_name}."
GrapheneModeNotFoundError
python
allegroai__clearml
clearml/backend_api/services/v2_23/frames.py
{ "start": 491657, "end": 493837 }
class ____(Response): """ Response of frames.prepare_download_for_dataview endpoint. :param prepare_id: Prepare ID (use when calling `download_for_dataview`) :type prepare_id: str """ _service = "frames" _action = "prepare_download_for_dataview" _version = "2.23" _schema = { "definitions": {}, "properties": { "prepare_id": { "description": "Prepare ID (use when calling `download_for_dataview`)", "type": ["string", "null"], } }, "type": "object", } def __init__(self, prepare_id=None, **kwargs): super(PrepareDownloadForDataviewResponse, self).__init__(**kwargs) self.prepare_id = prepare_id @schema_property("prepare_id") def prepare_id(self): return self._property_prepare_id @prepare_id.setter def prepare_id(self, value): if value is None: self._property_prepare_id = None return self.assert_isinstance(value, "prepare_id", six.string_types) self._property_prepare_id = value response_mapping = { ClearGetNextStateRequest: ClearGetNextStateResponse, GetNextRequest: GetNextResponse, GetWithProjectionRequest: GetWithProjectionResponse, GetSnippetsForDataviewRequest: GetSnippetsForDataviewResponse, GetSnippetsForDataview2Request: GetSnippetsForDataview2Response, GetSourceIdsForDataviewRequest: GetSourceIdsForDataviewResponse, GetSnippetsQueryForDataviewRequest: GetSnippetsQueryForDataviewResponse, GetCountRequest: GetCountResponse, GetNextForDataviewAndContextIdRequest: GetNextForDataviewAndContextIdResponse, GetNextForDataviewRequest: GetNextForDataviewResponse, GetCountForDataviewRequest: GetCountForDataviewResponse, GetNextForDataviewIdRequest: GetNextForDataviewIdResponse, GetCountForDataviewIdRequest: GetCountForDataviewIdResponse, GetByIdRequest: GetByIdResponse, GetByIdsRequest: GetByIdsResponse, PrepareDownloadForDataviewRequest: PrepareDownloadForDataviewResponse, DownloadForDataviewRequest: DownloadForDataviewResponse, }
PrepareDownloadForDataviewResponse
python
pola-rs__polars
py-polars/tests/unit/io/database/test_read.py
{ "start": 1243, "end": 1896 }
class ____: """Mock connection class for databases we can't test in CI.""" def __init__( self, driver: str, batch_size: int | None, exact_batch_size: bool, test_data: pa.Table, repeat_batch_calls: bool, ) -> None: self.__class__.__module__ = driver self._cursor = MockCursor( repeat_batch_calls=repeat_batch_calls, exact_batch_size=exact_batch_size, batched=(batch_size is not None), test_data=test_data, ) def close(self) -> None: pass def cursor(self) -> Any: return self._cursor
MockConnection
python
apache__airflow
providers/google/tests/unit/google/common/hooks/test_base_google.py
{ "start": 11909, "end": 14931 }
class ____: def setup_method(self): with mock.patch( "airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__", new=mock_base_gcp_hook_default_project_id, ): self.instance = hook.GoogleBaseHook(gcp_conn_id="google-cloud-default") def test_provide_gcp_credential_file_decorator_key_path(self): key_path = "/test/key-path" self.instance.extras = {"key_path": key_path} with self.instance.provide_gcp_credential_file_as_context(): assert os.environ[CREDENTIALS] == key_path @mock.patch("tempfile.NamedTemporaryFile") def test_provide_gcp_credential_file_decorator_key_content(self, mock_file): string_file = StringIO() file_content = '{"foo": "bar"}' file_name = "/test/mock-file" self.instance.extras = {"keyfile_dict": file_content} mock_file_handler = mock_file.return_value.__enter__.return_value mock_file_handler.name = file_name mock_file_handler.write = string_file.write with self.instance.provide_gcp_credential_file_as_context(): assert os.environ[CREDENTIALS] == file_name assert file_content == string_file.getvalue() @mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE}) def test_provide_gcp_credential_keep_environment(self): key_path = "/test/key-path" self.instance.extras = {"key_path": key_path} with self.instance.provide_gcp_credential_file_as_context(): assert os.environ[CREDENTIALS] == key_path assert os.environ[CREDENTIALS] == ENV_VALUE @mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE}) def test_provide_gcp_credential_keep_environment_when_exception(self): key_path = "/test/key-path" self.instance.extras = {"key_path": key_path} with pytest.raises(RuntimeError, match="Some exception occurred"): with self.instance.provide_gcp_credential_file_as_context(): raise RuntimeError("Some exception occurred") assert os.environ[CREDENTIALS] == ENV_VALUE @mock.patch.dict(os.environ, clear=True) def test_provide_gcp_credential_keep_clear_environment(self): key_path = "/test/key-path" self.instance.extras = {"key_path": key_path} with self.instance.provide_gcp_credential_file_as_context(): assert os.environ[CREDENTIALS] == key_path assert CREDENTIALS not in os.environ @mock.patch.dict(os.environ, clear=True) def test_provide_gcp_credential_keep_clear_environment_when_exception(self): key_path = "/test/key-path" self.instance.extras = {"key_path": key_path} with pytest.raises(RuntimeError, match="Some exception occurred"): with self.instance.provide_gcp_credential_file_as_context(): raise RuntimeError("Some exception occurred") assert CREDENTIALS not in os.environ @pytest.mark.db_test
TestProvideGcpCredentialFileAsContext
python
simonw__datasette
datasette/permissions.py
{ "start": 3923, "end": 5101 }
class ____: name: str description: str | None abbr: str | None = None resource_class: type[Resource] | None = None also_requires: str | None = None # Optional action name that must also be allowed @property def takes_parent(self) -> bool: """ Whether this action requires a parent identifier when instantiating its resource. Returns False for global-only actions (no resource_class). Returns True for all actions with a resource_class (all resources require a parent identifier). """ return self.resource_class is not None @property def takes_child(self) -> bool: """ Whether this action requires a child identifier when instantiating its resource. Returns False for global actions (no resource_class). Returns False for parent-level resources (DatabaseResource - parent_class is None). Returns True for child-level resources (TableResource, QueryResource - have a parent_class). """ if self.resource_class is None: return False return self.resource_class.parent_class is not None _reason_id = 1 @dataclass
Action
python
kamyu104__LeetCode-Solutions
Python/distinct-points-reachable-after-substring-removal.py
{ "start": 42, "end": 516 }
class ____(object): def distinctPoints(self, s, k): """ :type s: str :type k: int :rtype: int """ DIRECTIONS = {'U':(0, 1), 'D':(0, -1), 'L':(-1, 0), 'R':(1, 0)} x = y = 0 lookup = {(x, y)} for i in xrange(k, len(s)): x += DIRECTIONS[s[i]][0]-DIRECTIONS[s[i-k]][0] y += DIRECTIONS[s[i]][1]-DIRECTIONS[s[i-k]][1] lookup.add((x, y)) return len(lookup)
Solution
python
faif__python-patterns
patterns/other/hsm/hsm.py
{ "start": 4463, "end": 5026 }
class ____(OutOfService): def __init__(self, HierachicalStateMachine): self._hsm = HierachicalStateMachine def on_diagnostics_failed(self): super().send_diagnostics_failure_report() super().next_state("failed") def on_diagnostics_passed(self): super().send_diagnostics_pass_report() super().clear_alarm() # loss of redundancy alarm super().next_state("standby") def on_operator_inservice(self): super().abort_diagnostics() super().on_operator_inservice() # message ignored
Suspect
python
Textualize__rich
tests/test_inspect.py
{ "start": 1758, "end": 2155 }
class ____: """Foo test Second line """ def __init__(self, foo: int) -> None: """constructor docs.""" self.foo = foo @property def broken(self): raise InspectError() def method(self, a, b) -> str: """Multi line docs. """ return "test" def __dir__(self): return ["__init__", "broken", "method"]
Foo
python
pydantic__pydantic
pydantic-core/tests/conftest.py
{ "start": 1448, "end": 3550 }
class ____: def __init__( self, schema: CoreSchema, config: CoreConfig | None = None, *, validator_type: Literal['json', 'python'] | None = None, ): self.validator = SchemaValidator(schema, config) self.validator_type = validator_type def validate_python(self, py_input, strict: bool | None = None, context: Any = None): return self.validator.validate_python(py_input, strict=strict, context=context) def validate_json(self, json_str: str, strict: bool | None = None, context: Any = None): return self.validator.validate_json(json_str, strict=strict, context=context) def validate_test( self, py_input, strict: bool | None = None, context: Any = None, extra: ExtraBehavior | None = None ): if self.validator_type == 'json': return self.validator.validate_json( json.dumps(py_input, default=json_default), strict=strict, extra=extra, context=context, ) else: assert self.validator_type == 'python', self.validator_type return self.validator.validate_python(py_input, strict=strict, context=context, extra=extra) def isinstance_test(self, py_input, strict: bool | None = None, context: Any = None): if self.validator_type == 'json': try: self.validator.validate_json(json.dumps(py_input), strict=strict, context=context) return True except ValidationError: return False else: assert self.validator_type == 'python', self.validator_type return self.validator.isinstance_python(py_input, strict=strict, context=context) PyAndJson = type[PyAndJsonValidator] @pytest.fixture(params=['python', 'json']) def py_and_json(request) -> PyAndJson: class ChosenPyAndJsonValidator(PyAndJsonValidator): __init__ = functools.partialmethod(PyAndJsonValidator.__init__, validator_type=request.param) return ChosenPyAndJsonValidator
PyAndJsonValidator
python
explosion__spaCy
spacy/pipeline/legacy/entity_linker.py
{ "start": 1032, "end": 18788 }
class ____(TrainablePipe): """Pipeline component for named entity linking. DOCS: https://spacy.io/api/entitylinker """ NIL = "NIL" # string used to refer to a non-existing link def __init__( self, vocab: Vocab, model: Model, name: str = "entity_linker", *, labels_discard: Iterable[str], n_sents: int, incl_prior: bool, incl_context: bool, entity_vector_length: int, get_candidates: Callable[[KnowledgeBase, Span], Iterable[Candidate]], overwrite: bool = BACKWARD_OVERWRITE, scorer: Optional[Callable] = entity_linker_score, ) -> None: """Initialize an entity linker. vocab (Vocab): The shared vocabulary. model (thinc.api.Model): The Thinc Model powering the pipeline component. name (str): The component instance name, used to add entries to the losses during training. labels_discard (Iterable[str]): NER labels that will automatically get a "NIL" prediction. n_sents (int): The number of neighbouring sentences to take into account. incl_prior (bool): Whether or not to include prior probabilities from the KB in the model. incl_context (bool): Whether or not to include the local context in the model. entity_vector_length (int): Size of encoding vectors in the KB. get_candidates (Callable[[KnowledgeBase, Span], Iterable[Candidate]]): Function that produces a list of candidates, given a certain knowledge base and a textual mention. scorer (Optional[Callable]): The scoring method. Defaults to Scorer.score_links. DOCS: https://spacy.io/api/entitylinker#init """ self.vocab = vocab self.model = model self.name = name self.labels_discard = list(labels_discard) self.n_sents = n_sents self.incl_prior = incl_prior self.incl_context = incl_context self.get_candidates = get_candidates self.cfg: Dict[str, Any] = {"overwrite": overwrite} self.distance = CosineDistance(normalize=False) # how many neighbour sentences to take into account # create an empty KB by default. If you want to load a predefined one, specify it in 'initialize'. self.kb = empty_kb(entity_vector_length)(self.vocab) self.scorer = scorer def set_kb(self, kb_loader: Callable[[Vocab], KnowledgeBase]): """Define the KB of this pipe by providing a function that will create it using this object's vocab.""" if not callable(kb_loader): raise ValueError(Errors.E885.format(arg_type=type(kb_loader))) self.kb = kb_loader(self.vocab) def validate_kb(self) -> None: # Raise an error if the knowledge base is not initialized. if self.kb is None: raise ValueError(Errors.E1018.format(name=self.name)) if len(self.kb) == 0: raise ValueError(Errors.E139.format(name=self.name)) def initialize( self, get_examples: Callable[[], Iterable[Example]], *, nlp: Optional[Language] = None, kb_loader: Optional[Callable[[Vocab], KnowledgeBase]] = None, ): """Initialize the pipe for training, using a representative set of data examples. get_examples (Callable[[], Iterable[Example]]): Function that returns a representative sample of gold-standard Example objects. nlp (Language): The current nlp object the component is part of. kb_loader (Callable[[Vocab], KnowledgeBase]): A function that creates an InMemoryLookupKB from a Vocab instance. Note that providing this argument, will overwrite all data accumulated in the current KB. Use this only when loading a KB as-such from file. DOCS: https://spacy.io/api/entitylinker#initialize """ validate_get_examples(get_examples, "EntityLinker_v1.initialize") if kb_loader is not None: self.set_kb(kb_loader) self.validate_kb() nO = self.kb.entity_vector_length doc_sample = [] vector_sample = [] for example in islice(get_examples(), 10): doc_sample.append(example.x) vector_sample.append(self.model.ops.alloc1f(nO)) assert len(doc_sample) > 0, Errors.E923.format(name=self.name) assert len(vector_sample) > 0, Errors.E923.format(name=self.name) self.model.initialize( X=doc_sample, Y=self.model.ops.asarray(vector_sample, dtype="float32") ) def update( self, examples: Iterable[Example], *, drop: float = 0.0, sgd: Optional[Optimizer] = None, losses: Optional[Dict[str, float]] = None, ) -> Dict[str, float]: """Learn from a batch of documents and gold-standard information, updating the pipe's model. Delegates to predict and get_loss. examples (Iterable[Example]): A batch of Example objects. drop (float): The dropout rate. sgd (thinc.api.Optimizer): The optimizer. losses (Dict[str, float]): Optional record of the loss during training. Updated using the component name as the key. RETURNS (Dict[str, float]): The updated losses dictionary. DOCS: https://spacy.io/api/entitylinker#update """ self.validate_kb() if losses is None: losses = {} losses.setdefault(self.name, 0.0) if not examples: return losses validate_examples(examples, "EntityLinker_v1.update") sentence_docs = [] for eg in examples: sentences = [s for s in eg.reference.sents] kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True) for ent in eg.reference.ents: # KB ID of the first token is the same as the whole span kb_id = kb_ids[ent.start] if kb_id: try: # find the sentence in the list of sentences. sent_index = sentences.index(ent.sent) except AttributeError: # Catch the exception when ent.sent is None and provide a user-friendly warning raise RuntimeError(Errors.E030) from None # get n previous sentences, if there are any start_sentence = max(0, sent_index - self.n_sents) # get n posterior sentences, or as many < n as there are end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) # get token positions start_token = sentences[start_sentence].start end_token = sentences[end_sentence].end # append that span as a doc to training sent_doc = eg.predicted[start_token:end_token].as_doc() sentence_docs.append(sent_doc) set_dropout_rate(self.model, drop) if not sentence_docs: warnings.warn(Warnings.W093.format(name="Entity Linker")) return losses sentence_encodings, bp_context = self.model.begin_update(sentence_docs) loss, d_scores = self.get_loss( sentence_encodings=sentence_encodings, examples=examples ) bp_context(d_scores) if sgd is not None: self.finish_update(sgd) losses[self.name] += loss return losses def get_loss(self, examples: Iterable[Example], sentence_encodings: Floats2d): validate_examples(examples, "EntityLinker_v1.get_loss") entity_encodings = [] for eg in examples: kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True) for ent in eg.reference.ents: kb_id = kb_ids[ent.start] if kb_id: entity_encoding = self.kb.get_vector(kb_id) entity_encodings.append(entity_encoding) entity_encodings = self.model.ops.asarray2f(entity_encodings) if sentence_encodings.shape != entity_encodings.shape: err = Errors.E147.format( method="get_loss", msg="gold entities do not match up" ) raise RuntimeError(err) gradients = self.distance.get_grad(sentence_encodings, entity_encodings) loss = self.distance.get_loss(sentence_encodings, entity_encodings) loss = loss / len(entity_encodings) return float(loss), gradients def predict(self, docs: Iterable[Doc]) -> List[str]: """Apply the pipeline's model to a batch of docs, without modifying them. Returns the KB IDs for each entity in each doc, including NIL if there is no prediction. docs (Iterable[Doc]): The documents to predict. RETURNS (List[str]): The models prediction for each document. DOCS: https://spacy.io/api/entitylinker#predict """ self.validate_kb() entity_count = 0 final_kb_ids: List[str] = [] if not docs: return final_kb_ids if isinstance(docs, Doc): docs = [docs] for i, doc in enumerate(docs): sentences = [s for s in doc.sents] if len(doc) > 0: # Looping through each entity (TODO: rewrite) for ent in doc.ents: sent = ent.sent sent_index = sentences.index(sent) assert sent_index >= 0 # get n_neighbour sentences, clipped to the length of the document start_sentence = max(0, sent_index - self.n_sents) end_sentence = min(len(sentences) - 1, sent_index + self.n_sents) start_token = sentences[start_sentence].start end_token = sentences[end_sentence].end sent_doc = doc[start_token:end_token].as_doc() # currently, the context is the same for each entity in a sentence (should be refined) xp = self.model.ops.xp if self.incl_context: sentence_encoding = self.model.predict([sent_doc])[0] sentence_encoding_t = sentence_encoding.T sentence_norm = xp.linalg.norm(sentence_encoding_t) entity_count += 1 if ent.label_ in self.labels_discard: # ignoring this entity - setting to NIL final_kb_ids.append(self.NIL) else: candidates = list(self.get_candidates(self.kb, ent)) if not candidates: # no prediction possible for this entity - setting to NIL final_kb_ids.append(self.NIL) elif len(candidates) == 1: # shortcut for efficiency reasons: take the 1 candidate final_kb_ids.append(candidates[0].entity_) else: random.shuffle(candidates) # set all prior probabilities to 0 if incl_prior=False prior_probs = xp.asarray([c.prior_prob for c in candidates]) if not self.incl_prior: prior_probs = xp.asarray([0.0 for _ in candidates]) scores = prior_probs # add in similarity from the context if self.incl_context: entity_encodings = xp.asarray( [c.entity_vector for c in candidates] ) entity_norm = xp.linalg.norm(entity_encodings, axis=1) if len(entity_encodings) != len(prior_probs): raise RuntimeError( Errors.E147.format( method="predict", msg="vectors not of equal length", ) ) # cosine similarity sims = xp.dot(entity_encodings, sentence_encoding_t) / ( sentence_norm * entity_norm ) if sims.shape != prior_probs.shape: raise ValueError(Errors.E161) scores = prior_probs + sims - (prior_probs * sims) best_index = scores.argmax().item() best_candidate = candidates[best_index] final_kb_ids.append(best_candidate.entity_) if not (len(final_kb_ids) == entity_count): err = Errors.E147.format( method="predict", msg="result variables not of equal length" ) raise RuntimeError(err) return final_kb_ids def set_annotations(self, docs: Iterable[Doc], kb_ids: List[str]) -> None: """Modify a batch of documents, using pre-computed scores. docs (Iterable[Doc]): The documents to modify. kb_ids (List[str]): The IDs to set, produced by EntityLinker.predict. DOCS: https://spacy.io/api/entitylinker#set_annotations """ count_ents = len([ent for doc in docs for ent in doc.ents]) if count_ents != len(kb_ids): raise ValueError(Errors.E148.format(ents=count_ents, ids=len(kb_ids))) i = 0 overwrite = self.cfg["overwrite"] for doc in docs: for ent in doc.ents: kb_id = kb_ids[i] i += 1 for token in ent: if token.ent_kb_id == 0 or overwrite: token.ent_kb_id_ = kb_id def to_bytes(self, *, exclude=tuple()): """Serialize the pipe to a bytestring. exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (bytes): The serialized object. DOCS: https://spacy.io/api/entitylinker#to_bytes """ self._validate_serialization_attrs() serialize = {} if hasattr(self, "cfg") and self.cfg is not None: serialize["cfg"] = lambda: srsly.json_dumps(self.cfg) serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude) serialize["kb"] = self.kb.to_bytes serialize["model"] = self.model.to_bytes return util.to_bytes(serialize, exclude) def from_bytes(self, bytes_data, *, exclude=tuple()): """Load the pipe from a bytestring. exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (TrainablePipe): The loaded object. DOCS: https://spacy.io/api/entitylinker#from_bytes """ self._validate_serialization_attrs() def load_model(b): try: self.model.from_bytes(b) except AttributeError: raise ValueError(Errors.E149) from None deserialize = {} if hasattr(self, "cfg") and self.cfg is not None: deserialize["cfg"] = lambda b: self.cfg.update(srsly.json_loads(b)) deserialize["vocab"] = lambda b: self.vocab.from_bytes(b, exclude=exclude) deserialize["kb"] = lambda b: self.kb.from_bytes(b) deserialize["model"] = load_model util.from_bytes(bytes_data, deserialize, exclude) return self def to_disk( self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() ) -> None: """Serialize the pipe to disk. path (str / Path): Path to a directory. exclude (Iterable[str]): String names of serialization fields to exclude. DOCS: https://spacy.io/api/entitylinker#to_disk """ serialize = {} serialize["vocab"] = lambda p: self.vocab.to_disk(p, exclude=exclude) serialize["cfg"] = lambda p: srsly.write_json(p, self.cfg) serialize["kb"] = lambda p: self.kb.to_disk(p) serialize["model"] = lambda p: self.model.to_disk(p) util.to_disk(path, serialize, exclude) def from_disk( self, path: Union[str, Path], *, exclude: Iterable[str] = SimpleFrozenList() ) -> "EntityLinker_v1": """Load the pipe from disk. Modifies the object in place and returns it. path (str / Path): Path to a directory. exclude (Iterable[str]): String names of serialization fields to exclude. RETURNS (EntityLinker): The modified EntityLinker object. DOCS: https://spacy.io/api/entitylinker#from_disk """ def load_model(p): try: with p.open("rb") as infile: self.model.from_bytes(infile.read()) except AttributeError: raise ValueError(Errors.E149) from None deserialize: Dict[str, Callable[[Any], Any]] = {} deserialize["cfg"] = lambda p: self.cfg.update(deserialize_config(p)) deserialize["vocab"] = lambda p: self.vocab.from_disk(p, exclude=exclude) deserialize["kb"] = lambda p: self.kb.from_disk(p) deserialize["model"] = load_model util.from_disk(path, deserialize, exclude) return self def rehearse(self, examples, *, sgd=None, losses=None, **config): raise NotImplementedError def add_label(self, label): raise NotImplementedError
EntityLinker_v1
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pytest_style/PT027_0.py
{ "start": 18, "end": 1295 }
class ____(unittest.TestCase): def test_errors(self): with self.assertRaises(ValueError): raise ValueError with self.assertRaises(expected_exception=ValueError): raise ValueError with self.failUnlessRaises(ValueError): raise ValueError with self.assertRaisesRegex(ValueError, "test"): raise ValueError("test") with self.assertRaisesRegex(ValueError, expected_regex="test"): raise ValueError("test") with self.assertRaisesRegex( expected_exception=ValueError, expected_regex="test" ): raise ValueError("test") with self.assertRaisesRegex( expected_regex="test", expected_exception=ValueError ): raise ValueError("test") with self.assertRaisesRegexp(ValueError, "test"): raise ValueError("test") def test_unfixable_errors(self): with self.assertRaises(ValueError, msg="msg"): raise ValueError with self.assertRaises( # comment ValueError ): raise ValueError with ( self # comment .assertRaises(ValueError) ): raise ValueError
Test
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/unit_tests.py
{ "start": 99, "end": 1976 }
class ____(dg.Config): num: int = 1 @dg.op def add_one(config: AddOneConfig) -> int: return config.num + 1 @dg.op def add_two(config: AddTwoConfig) -> int: return config.num + 2 @dg.op def subtract(left: int, right: int) -> int: return left - right @dg.graph def do_math(): subtract(add_one(), add_two()) do_math_job = do_math.to_job() @dg.op(out={"a_num": dg.Out(dagster_type=int)}) def emit_events_op(): a_num = 2 yield dg.ExpectationResult( success=a_num > 0, label="positive", description="A num must be positive" ) yield dg.AssetMaterialization( asset_key="persisted_string", description="Let us pretend we persisted the string somewhere", ) yield dg.Output(value=a_num, output_name="a_num") @dg.graph def emit_events(): emit_events_op() emit_events_job = emit_events.to_job() # start_test_job_marker def test_job(): result = do_math_job.execute_in_process() # return type is dg.ExecuteInProcessResult assert isinstance(result, dg.ExecuteInProcessResult) assert result.success # inspect individual dg.op result assert result.output_for_node("add_one") == 2 assert result.output_for_node("add_two") == 3 assert result.output_for_node("subtract") == -1 # end_test_job_marker # start_invocation_op_marker @dg.op def my_op_to_test(): return 5 # end_invocation_op_marker # start_test_op_marker def test_op_with_invocation(): assert my_op_to_test() == 5 # end_test_op_marker # start_invocation_op_inputs_marker @dg.op def my_op_with_inputs(x, y): return x + y # end_invocation_op_inputs_marker # start_test_op_with_inputs_marker def test_inputs_op_with_invocation(): assert my_op_with_inputs(5, 6) == 11 # end_test_op_with_inputs_marker # start_op_requires_foo_marker from dagster import ConfigurableResource
AddTwoConfig
python
kubernetes-client__python
kubernetes/client/models/v1beta1_device_claim.py
{ "start": 383, "end": 5899 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'config': 'list[V1beta1DeviceClaimConfiguration]', 'constraints': 'list[V1beta1DeviceConstraint]', 'requests': 'list[V1beta1DeviceRequest]' } attribute_map = { 'config': 'config', 'constraints': 'constraints', 'requests': 'requests' } def __init__(self, config=None, constraints=None, requests=None, local_vars_configuration=None): # noqa: E501 """V1beta1DeviceClaim - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._config = None self._constraints = None self._requests = None self.discriminator = None if config is not None: self.config = config if constraints is not None: self.constraints = constraints if requests is not None: self.requests = requests @property def config(self): """Gets the config of this V1beta1DeviceClaim. # noqa: E501 This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim. # noqa: E501 :return: The config of this V1beta1DeviceClaim. # noqa: E501 :rtype: list[V1beta1DeviceClaimConfiguration] """ return self._config @config.setter def config(self, config): """Sets the config of this V1beta1DeviceClaim. This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim. # noqa: E501 :param config: The config of this V1beta1DeviceClaim. # noqa: E501 :type: list[V1beta1DeviceClaimConfiguration] """ self._config = config @property def constraints(self): """Gets the constraints of this V1beta1DeviceClaim. # noqa: E501 These constraints must be satisfied by the set of devices that get allocated for the claim. # noqa: E501 :return: The constraints of this V1beta1DeviceClaim. # noqa: E501 :rtype: list[V1beta1DeviceConstraint] """ return self._constraints @constraints.setter def constraints(self, constraints): """Sets the constraints of this V1beta1DeviceClaim. These constraints must be satisfied by the set of devices that get allocated for the claim. # noqa: E501 :param constraints: The constraints of this V1beta1DeviceClaim. # noqa: E501 :type: list[V1beta1DeviceConstraint] """ self._constraints = constraints @property def requests(self): """Gets the requests of this V1beta1DeviceClaim. # noqa: E501 Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated. # noqa: E501 :return: The requests of this V1beta1DeviceClaim. # noqa: E501 :rtype: list[V1beta1DeviceRequest] """ return self._requests @requests.setter def requests(self, requests): """Sets the requests of this V1beta1DeviceClaim. Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated. # noqa: E501 :param requests: The requests of this V1beta1DeviceClaim. # noqa: E501 :type: list[V1beta1DeviceRequest] """ self._requests = requests def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1DeviceClaim): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1DeviceClaim): return True return self.to_dict() != other.to_dict()
V1beta1DeviceClaim
python
scrapy__scrapy
scrapy/spidermiddlewares/httperror.py
{ "start": 835, "end": 2816 }
class ____: crawler: Crawler def __init__(self, settings: BaseSettings): self.handle_httpstatus_all: bool = settings.getbool("HTTPERROR_ALLOW_ALL") self.handle_httpstatus_list: list[int] = settings.getlist( "HTTPERROR_ALLOWED_CODES" ) @classmethod def from_crawler(cls, crawler: Crawler) -> Self: o = cls(crawler.settings) o.crawler = crawler return o @_warn_spider_arg def process_spider_input( self, response: Response, spider: Spider | None = None ) -> None: if 200 <= response.status < 300: # common case return meta = response.meta if meta.get("handle_httpstatus_all", False): return if "handle_httpstatus_list" in meta: allowed_statuses = meta["handle_httpstatus_list"] elif self.handle_httpstatus_all: return else: allowed_statuses = getattr( self.crawler.spider, "handle_httpstatus_list", self.handle_httpstatus_list, ) if response.status in allowed_statuses: return raise HttpError(response, "Ignoring non-200 response") @_warn_spider_arg def process_spider_exception( self, response: Response, exception: Exception, spider: Spider | None = None ) -> Iterable[Any] | None: if isinstance(exception, HttpError): assert self.crawler.stats self.crawler.stats.inc_value("httperror/response_ignored_count") self.crawler.stats.inc_value( f"httperror/response_ignored_status_count/{response.status}" ) logger.info( "Ignoring response %(response)r: HTTP status code is not handled or not allowed", {"response": response}, extra={"spider": self.crawler.spider}, ) return [] return None
HttpErrorMiddleware
python
ray-project__ray
python/ray/dag/tests/experimental/test_dag_error_handling.py
{ "start": 9898, "end": 23796 }
class ____: @pytest.mark.parametrize("use_multi_output_node", [False, True]) def test_compile_twice_fails(self, ray_start_regular, use_multi_output_node: bool): a = Actor.remote(0) with InputNode() as i: if use_multi_output_node: dag = MultiOutputNode([a.echo.bind(i)]) else: dag = a.echo.bind(i) compiled_dag = dag.experimental_compile() # Trying to compile again should fail. expected_err = ( "It is not allowed to call `experimental_compile` on the same DAG " "object multiple times no matter whether `teardown` is called or not. " "Please reuse the existing compiled DAG or create a new one." ) with pytest.raises( ValueError, match=expected_err, ): compiled_dag = dag.experimental_compile() # Even if we teardown the DAG, trying to compile again should still fail. compiled_dag.teardown() with pytest.raises( ValueError, match=expected_err, ): compiled_dag = dag.experimental_compile() def test_compile_twice_with_different_nodes(self, ray_start_regular): a = Actor.remote(0) b = Actor.remote(0) with InputNode() as i: branch1 = a.echo.bind(i) branch2 = b.echo.bind(i) dag = MultiOutputNode([branch1, branch2]) compiled_dag = dag.experimental_compile() compiled_dag.teardown() with pytest.raises( ValueError, match="The DAG was compiled more than once. The following two " "nodes call `experimental_compile`: ", ): branch2.experimental_compile() def test_exceed_max_buffered_results(ray_start_regular): a = Actor.remote(0) with InputNode() as i: dag = a.inc.bind(i) compiled_dag = dag.experimental_compile(_max_buffered_results=1) refs = [] for i in range(2): ref = compiled_dag.execute(1) # Hold the refs to avoid get() being called on the ref # when it goes out of scope refs.append(ref) # ray.get() on the 2nd ref fails because the DAG cannot buffer 2 results. with pytest.raises( ray.exceptions.RayCgraphCapacityExceeded, match=( "The compiled graph can't have more than 1 buffered results, " r"and you currently have 1 buffered results. Call `ray.get\(\)` on " r"CompiledDAGRef's \(or await on CompiledDAGFuture's\) to retrieve " "results, or increase `_max_buffered_results` if buffering is " "desired, note that this will increase driver memory usage." ), ): ray.get(ref) def test_exceed_max_buffered_results_multi_output(ray_start_regular): a = Actor.remote(0) b = Actor.remote(0) with InputNode() as inp: dag = MultiOutputNode([a.inc.bind(inp), b.inc.bind(inp)]) compiled_dag = dag.experimental_compile(_max_buffered_results=1) refs = [] for _ in range(2): ref = compiled_dag.execute(1) # Hold the refs to avoid get() being called on the ref # when it goes out of scope refs.append(ref) # If there are results not fetched from an execution, that execution # still counts towards the number of buffered results. ray.get(refs[0][0]) # ray.get() on the 2nd ref fails because the DAG cannot buffer 2 results. with pytest.raises( ray.exceptions.RayCgraphCapacityExceeded, match=( "The compiled graph can't have more than 1 buffered results, " r"and you currently have 1 buffered results. Call `ray.get\(\)` on " r"CompiledDAGRef's \(or await on CompiledDAGFuture's\) to retrieve " "results, or increase `_max_buffered_results` if buffering is " "desired, note that this will increase driver memory usage." ), ): ray.get(ref[0]) def test_dag_fault_tolerance_chain(ray_start_regular): actors = [ Actor.remote(0, fail_after=10 if i == 0 else None, sys_exit=False) for i in range(4) ] with InputNode() as i: dag = i for a in actors: dag = a.echo.bind(dag) compiled_dag = dag.experimental_compile() for i in range(9): ref = compiled_dag.execute(i) results = ray.get(ref) with pytest.raises(RuntimeError): for i in range(9): ref = compiled_dag.execute(i) results = ray.get(ref) assert results == i compiled_dag.teardown() # All actors are still alive. ray.get([actor.sleep.remote(0) for actor in actors]) # Remaining actors can be reused. actors.pop(0) with InputNode() as i: dag = i for a in actors: dag = a.echo.bind(dag) compiled_dag = dag.experimental_compile() for i in range(10): ref = compiled_dag.execute(i) results = ray.get(ref) assert results == i def test_dag_fault_tolerance(ray_start_regular): actors = [ Actor.remote(0, fail_after=10 if i == 0 else None, sys_exit=False) for i in range(4) ] with InputNode() as i: out = [a.inc.bind(i) for a in actors] dag = MultiOutputNode(out) compiled_dag = dag.experimental_compile() for i in range(9): refs = compiled_dag.execute(1) assert ray.get(refs) == [i + 1] * len(actors) with pytest.raises(RuntimeError): for i in range(9, 20): refs = compiled_dag.execute(1) assert ray.get(refs) == [i + 1] * len(actors) compiled_dag.teardown() # All actors are still alive. ray.get([actor.sleep.remote(0) for actor in actors]) # Remaining actors can be reused. actors.pop(0) with InputNode() as i: out = [a.inc.bind(i) for a in actors] dag = MultiOutputNode(out) compiled_dag = dag.experimental_compile() for i in range(10): ray.get(compiled_dag.execute(1)) def test_dag_fault_tolerance_sys_exit(ray_start_regular): actors = [ Actor.remote(0, fail_after=10 if i == 0 else None, sys_exit=True) for i in range(4) ] with InputNode() as i: out = [a.inc.bind(i) for a in actors] dag = MultiOutputNode(out) compiled_dag = dag.experimental_compile() for i in range(9): refs = compiled_dag.execute(1) assert ray.get(refs) == [i + 1] * len(actors) with pytest.raises( ActorDiedError, match="The actor died unexpectedly before finishing this task." ): for i in range(9): refs = compiled_dag.execute(1) ray.get(refs) # Remaining actors are still alive. with pytest.raises(ray.exceptions.RayActorError): ray.get(actors[0].echo.remote("hello")) actors.pop(0) ray.get([actor.echo.remote("hello") for actor in actors]) # Remaining actors can be reused. with InputNode() as i: out = [a.inc.bind(i) for a in actors] dag = MultiOutputNode(out) compiled_dag = dag.experimental_compile() for i in range(10): refs = compiled_dag.execute(1) ray.get(refs) def test_dag_teardown_while_running(ray_start_regular): a = Actor.remote(0) with InputNode() as inp: dag = a.sleep.bind(inp) compiled_dag = dag.experimental_compile() ref = compiled_dag.execute(3) # 3-second slow task running async compiled_dag.teardown() try: ray.get(ref) # Sanity check the channel doesn't block. except Exception: pass # Check we can still use the actor after first DAG teardown. with InputNode() as inp: dag = a.sleep.bind(inp) compiled_dag = dag.experimental_compile() ref = compiled_dag.execute(0.1) result = ray.get(ref) assert result == 0.1 def test_asyncio_exceptions(ray_start_regular): a = Actor.remote(0) with InputNode() as i: dag = a.inc.bind(i) loop = get_or_create_event_loop() compiled_dag = dag.experimental_compile(enable_asyncio=True) async def main(): fut = await compiled_dag.execute_async(1) result = await fut assert result == 1 fut = await compiled_dag.execute_async("hello") with pytest.raises(TypeError) as exc_info: await fut # Traceback should match the original actor class definition. assert "self.i += x" in str(exc_info.value) # Can throw an error multiple times. fut = await compiled_dag.execute_async("hello") with pytest.raises(TypeError) as exc_info: await fut # Traceback should match the original actor class definition. assert "self.i += x" in str(exc_info.value) # Can use the DAG after exceptions are thrown. fut = await compiled_dag.execute_async(1) result = await fut assert result == 2 loop.run_until_complete(main()) def test_channel_read_after_close(ray_start_regular): # Tests that read to a channel after Compiled Graph teardown raises a # RayChannelError exception as the channel is closed (see issue #46284). @ray.remote class Actor: def foo(self, arg): return arg a = Actor.remote() with InputNode() as inp: dag = a.foo.bind(inp) dag = dag.experimental_compile() ref = dag.execute(1) dag.teardown() with pytest.raises(RayChannelError, match="Channel closed."): ray.get(ref) def test_channel_write_after_close(ray_start_regular): # Tests that write to a channel after Compiled Graph teardown raises a # RayChannelError exception as the channel is closed. @ray.remote class Actor: def foo(self, arg): return arg a = Actor.remote() with InputNode() as inp: dag = a.foo.bind(inp) dag = dag.experimental_compile() dag.teardown() with pytest.raises(RayChannelError, match="Channel closed."): dag.execute(1) def test_multi_arg_exception(shutdown_only): a = Actor.remote(0) with InputNode() as i: o1, o2 = a.return_two_but_raise_exception.bind(i) dag = MultiOutputNode([o1, o2]) compiled_dag = dag.experimental_compile() for _ in range(3): x, y = compiled_dag.execute(1) with pytest.raises(RuntimeError): ray.get(x) with pytest.raises(RuntimeError): ray.get(y) def test_multi_arg_exception_async(shutdown_only): a = Actor.remote(0) with InputNode() as i: o1, o2 = a.return_two_but_raise_exception.bind(i) dag = MultiOutputNode([o1, o2]) compiled_dag = dag.experimental_compile(enable_asyncio=True) async def main(): for _ in range(3): x, y = await compiled_dag.execute_async(1) with pytest.raises(RuntimeError): await x with pytest.raises(RuntimeError): await y loop = get_or_create_event_loop() loop.run_until_complete(main()) def test_signature_mismatch(shutdown_only): @ray.remote class Worker: def w(self, x): return 1 def f(self, x, *, y): pass def g(self, x, y, z=1): pass worker = Worker.remote() with pytest.raises( TypeError, match=( r"got an unexpected keyword argument 'y'\. The function `w` has a " r"signature `\(x\)`, but the given arguments to `bind` doesn't match\. " r".*args:.*kwargs:.*" ), ): with InputNode() as inp: _ = worker.w.bind(inp, y=inp) with pytest.raises( TypeError, match=( r"too many positional arguments\. The function `w` has a signature " r"`\(x\)`, but the given arguments to `bind` doesn't match\. " r"args:.*kwargs:.*" ), ): with InputNode() as inp: _ = worker.w.bind(inp, inp) with pytest.raises( TypeError, # Starting from Python 3.12, the error message includes "keyword-only." # Therefore, we need to match both "required keyword-only argument" and # "required argument." match=( r"missing a required (keyword-only )?argument: 'y'\. " r"The function `f` has a signature `\(x, \*, y\)`, " r"but the given arguments to `bind` doesn't match\. " r"args:.*kwargs:.*" ), ): with InputNode() as inp: _ = worker.f.bind(inp) with pytest.raises( TypeError, match=( r"missing a required argument: 'y'\. The function `g` has a signature " r"`\(x, y, z=1\)`, but the given arguments to `bind` doesn't match\. " r"args:.*kwargs:.*" ), ): with InputNode() as inp: _ = worker.g.bind(inp) def test_missing_input_node(): @ray.remote class Actor: def __init__(self): pass def f(self, input): return input def add(self, a, b): return a + b actor = Actor.remote() with ray.dag.InputNode() as dag_input: input0, input1, input2 = dag_input[0], dag_input[1], dag_input[2] _ = actor.f.bind(input1) dag = actor.add.bind(input0, input2) with pytest.raises( ValueError, match="Compiled Graph expects input to be accessed " "using all of attributes 0, 1, 2, " "but 1 is unused. " "Ensure all input attributes are used and contribute " "to the computation of the Compiled Graph output.", ): dag.experimental_compile() def test_sigint_get_dagref(ray_start_cluster): driver_script = """ import ray from ray.dag import InputNode import time ray.init() @ray.remote
TestDAGExceptionCompileMultipleTimes
python
huggingface__transformers
src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py
{ "start": 34748, "end": 36233 }
class ____(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.max_distance = config.bias_max_distance self.symmetric = config.bias_symmetric self.num_buckets = self.max_distance if not config.bias_symmetric: self.num_buckets *= 2 self.bias_values = nn.Embedding(self.num_buckets, config.num_attention_heads) def forward(self, x): # instantiate bias compatible with shape of x max_pos = x.size(1) context_position = torch.arange(max_pos, device=x.device, dtype=torch.long)[:, None] memory_position = torch.arange(max_pos, device=x.device, dtype=torch.long)[None, :] relative_position = memory_position - context_position # clipping to a maximum distance using ops that play well with ONNX export relative_position = relative_position.masked_fill(relative_position < -self.max_distance, -self.max_distance) relative_position = relative_position.masked_fill( relative_position > self.max_distance - 1, self.max_distance - 1 ) # mapping from relative position to index in the bias parameter bias_idx = relative_position bias_idx = bias_idx.abs() if self.symmetric else bias_idx + self.num_buckets // 2 att_bias = self.bias_values(bias_idx) att_bias = att_bias.permute(2, 0, 1).unsqueeze(0) return att_bias
Phi4MultimodalAudioRelativeAttentionBias
python
pytorch__pytorch
torch/utils/_sympy/reference.py
{ "start": 8240, "end": 14048 }
class ____: # NB: This is actually dead, because with Proxy tracing the factory # function isn't traced correctly. Here for completeness. @staticmethod def constant(c, dtype): d: int | float | bool if dtype is torch.int64: d = int(c) elif dtype is torch.double: d = float(c) elif dtype is torch.bool: d = bool(c) else: raise AssertionError(f"unrecognized dtype {dtype}") return torch.ops.aten.scalar_tensor.default(d, dtype=dtype) @staticmethod def or_(a, b): return torch.ops.aten.logical_or.default(a, b) @staticmethod def and_(a, b): return torch.ops.aten.logical_and.default(a, b) @staticmethod def bitwise_and(a, b): return torch.ops.aten.bitwise_and(a, b) @staticmethod def bitwise_or(a, b): return torch.ops.aten.bitwise_or(a, b) @staticmethod def bitwise_xor(a, b): return torch.ops.aten.bitwise_xor(a, b) @staticmethod def eq(a, b): return torch.ops.aten.eq.Tensor(a, b) @classmethod def ne(cls, a, b): return torch.ops.aten.ne.Tensor(a, b) @staticmethod def lt(a, b): return torch.ops.aten.lt.Tensor(a, b) @staticmethod def gt(a, b): return torch.ops.aten.gt.Tensor(a, b) @staticmethod def le(a, b): return torch.ops.aten.le.Tensor(a, b) @staticmethod def ge(a, b): return torch.ops.aten.ge.Tensor(a, b) @staticmethod def not_(a): return torch.ops.aten.logical_not.default(a) @staticmethod def reciprocal(x): return torch.ops.aten.reciprocal.default(x) @staticmethod def square(x): # TODO: maybe composite implicit autograd doesn't work here? return torch.ops.aten.square.default(x) @staticmethod def trunc_to_int(x, dtype): return _to_dtype(torch.ops.aten.trunc.default(x), dtype) @staticmethod def ceil_to_int(x, dtype): return _to_dtype(torch.ops.aten.ceil.default(x), dtype) @staticmethod def floor_to_int(x, dtype): return _to_dtype(torch.ops.aten.floor.default(x), dtype) @staticmethod def floor(x): return torch.ops.aten.floor.default(x) @staticmethod def ceil(x): return torch.ops.aten.ceil.default(x) @staticmethod def to_dtype(x, dtype): return _to_dtype(x, dtype) @staticmethod def mod(x, y) -> NoReturn: # TODO: https://github.com/pytorch/pytorch/pull/133654 raise NotImplementedError( "no C-style modulus operation available from frontend atm" ) @staticmethod def abs(x): return torch.ops.aten.abs.default(x) @staticmethod def neg(x): return torch.ops.aten.neg.default(x) @staticmethod def truediv(a, b): return torch.ops.aten.true_divide.Tensor(a, b) @staticmethod def int_truediv(a, b): raise NotImplementedError( "Python int truediv difficult to implement in PyTorch atm" ) # TODO: This is wrong, CPython has a custom implementation of true # division that results in higher precision when the floats are # sufficiently large. Short term fix: add a guard here return torch.ops.aten.true_divide.default( _to_dtype(a, torch.float64), _to_dtype(b, torch.float64) ) @staticmethod def floordiv(a, b): return torch.ops.aten.div.Tensor_mode(a, b, rounding_mode="floor") @staticmethod def truncdiv(a, b) -> NoReturn: raise NotImplementedError( "no C-style truncdiv operation available from frontend atm" ) @staticmethod def add(a, b): return torch.ops.aten.add.Tensor(a, b) @staticmethod def mul(a, b): return torch.ops.aten.mul.Tensor(a, b) @staticmethod def sub(a, b): return torch.ops.aten.sub.Tensor(a, b) @staticmethod def exp(x): return torch.ops.aten.exp.default(x) @staticmethod def log(x): return torch.ops.aten.log.default(x) @staticmethod def log2(x): return torch.ops.aten.log2.default(x) @staticmethod def sqrt(x): return torch.ops.aten.sqrt.default(x) @staticmethod def sin(x): return torch.ops.aten.sin.default(x) @staticmethod def cos(x): return torch.ops.aten.cos.default(x) @staticmethod def tanh(x): return torch.ops.aten.tanh.default(x) @staticmethod def sinh(x): return torch.ops.aten.sinh.default(x) @staticmethod def cosh(x): return torch.ops.aten.cosh.default(x) @staticmethod def tan(x): return torch.ops.aten.tan.default(x) @staticmethod def acos(x): return torch.ops.aten.acos.default(x) @staticmethod def atan(x): return torch.ops.aten.atan.default(x) @staticmethod def asin(x): return torch.ops.aten.asin.default(x) @staticmethod def pow(a, b): return torch.ops.aten.pow.Tensor_Tensor(a, b) @staticmethod def pow_by_natural(a, b): # NB: pow handles int x int fine return torch.ops.aten.pow.Tensor_Tensor(a, b) @staticmethod def minimum(a, b): return torch.ops.aten.minimum.default(a, b) @staticmethod def maximum(a, b): return torch.ops.aten.maximum.default(a, b) @staticmethod def round_to_int(a, dtype): return torch.ops.aten.round.default(a) @staticmethod def round_decimal(a, b) -> NoReturn: raise NotImplementedError( "round decimal doesn't support Tensor second argument atm" ) # return torch.ops.aten.round.decimals(a, b)
TensorReferenceAnalysis
python
kamyu104__LeetCode-Solutions
Python/01-matrix.py
{ "start": 47, "end": 1052 }
class ____(object): def updateMatrix(self, matrix): """ :type matrix: List[List[int]] :rtype: List[List[int]] """ for i in xrange(len(matrix)): for j in xrange(len(matrix[i])): if not matrix[i][j]: continue matrix[i][j] = float("inf") if i > 0: matrix[i][j] = min(matrix[i][j], matrix[i-1][j]+1) if j > 0: matrix[i][j] = min(matrix[i][j], matrix[i][j-1]+1) for i in reversed(xrange(len(matrix))): for j in reversed(xrange(len(matrix[i]))): if not matrix[i][j]: continue if i < len(matrix)-1: matrix[i][j] = min(matrix[i][j], matrix[i+1][j]+1) if j < len(matrix[i])-1: matrix[i][j] = min(matrix[i][j], matrix[i][j+1]+1) return matrix # Time: O(m * n) # Space: O(m * n) # dp solution
Solution
python
walkccc__LeetCode
solutions/2779. Maximum Beauty of an Array After Applying Operation/2779-2.py
{ "start": 0, "end": 274 }
class ____: def maximumBeauty(self, nums: list[int], k: int) -> int: nums.sort() # l and r track the maximum window instead of the valid window. l = 0 for r in range(len(nums)): if nums[r] - nums[l] > 2 * k: l += 1 return r - l + 1
Solution
python
ray-project__ray
release/nightly_tests/stress_tests/test_dead_actors.py
{ "start": 557, "end": 4002 }
class ____(object): def __init__(self, num_children, death_probability): self.death_probability = death_probability self.children = [Child.remote(death_probability) for _ in range(num_children)] def ping(self, num_pings): children_outputs = [] for _ in range(num_pings): children_outputs += [child.ping.remote() for child in self.children] try: ray.get(children_outputs) except Exception: # Replace the children if one of them died. self.__init__(len(self.children), self.death_probability) def kill(self): # Clean up children. ray.get([child.__ray_terminate__.remote() for child in self.children]) def parse_script_args(): parser = argparse.ArgumentParser() parser.add_argument("--num-nodes", type=int, default=100) parser.add_argument("--num-parents", type=int, default=10) parser.add_argument("--num-children", type=int, default=10) parser.add_argument("--death-probability", type=int, default=0.95) return parser.parse_known_args() if __name__ == "__main__": args, unknown = parse_script_args() result = {} # These numbers need to correspond with the autoscaler config file. # The number of remote nodes in the autoscaler should upper bound # these because sometimes nodes fail to update. num_remote_nodes = args.num_nodes num_parents = args.num_parents num_children = args.num_children death_probability = args.death_probability # Wait until the expected number of nodes have joined the cluster. num_nodes = len(ray.nodes()) assert ( num_nodes >= num_remote_nodes + 1 ), f"Expect {num_remote_nodes+1}, but only {num_nodes} joined." logger.info( "Nodes have all joined. There are %s resources.", ray.cluster_resources() ) parents = [ Parent.remote(num_children, death_probability) for _ in range(num_parents) ] start = time.time() loop_times = [] for i in range(100): loop_start = time.time() ray.get([parent.ping.remote(10) for parent in parents]) # Kill a parent actor with some probability. exit_chance = np.random.rand() if exit_chance > death_probability: parent_index = np.random.randint(len(parents)) parents[parent_index].kill.remote() parents[parent_index] = Parent.remote(num_children, death_probability) logger.info("Finished trial %s", i) loop_times.append(time.time() - loop_start) print("Finished in: {}s".format(time.time() - start)) print("Average iteration time: {}s".format(sum(loop_times) / len(loop_times))) print("Max iteration time: {}s".format(max(loop_times))) print("Min iteration time: {}s".format(min(loop_times))) result["total_time"] = time.time() - start result["avg_iteration_time"] = sum(loop_times) / len(loop_times) result["max_iteration_time"] = max(loop_times) result["min_iteration_time"] = min(loop_times) if os.environ.get("IS_SMOKE_TEST") != "1": result["perf_metrics"] = [ { "perf_metric_name": "avg_iteration_time", "perf_metric_value": result["avg_iteration_time"], "perf_metric_type": "LATENCY", } ] print("PASSED.") with open(os.environ["TEST_OUTPUT_JSON"], "w") as f: f.write(json.dumps(result))
Parent
python
pytorch__pytorch
test/dynamo/test_ctx_manager.py
{ "start": 66162, "end": 67523 }
class ____( torch._dynamo.test_case.TestCaseWithNestedGraphBreaks ): def setUp(self): super().setUp() self._prev = torch._dynamo.config.enable_trace_contextlib self._u_prev = torch._dynamo.config.enable_trace_unittest torch._dynamo.config.enable_trace_contextlib = True torch._dynamo.config.enable_trace_unittest = True def tearDown(self): super().tearDown() torch._dynamo.config.enable_trace_contextlib = self._prev torch._dynamo.config.enable_trace_unittest = self._u_prev def test_ctx_basic0(self): @contextlib.contextmanager def set_default_dtype(dtype): old_dtype = torch.get_default_dtype() try: torch.set_default_dtype(dtype) yield finally: torch.set_default_dtype(old_dtype) eager = EagerAndRecordGraphs() @torch.compile(backend=eager, fullgraph=True) def fn(): with set_default_dtype(torch.float64): x = torch.tensor([3.0, 3.0 + 5.0j]) return x y = fn() self.assertEqual(y.dtype, torch.complex128) graph = eager.graphs[0] actual = normalize_gm(graph.print_readable(False)) self.assertExpectedInline( actual, """\
ContextlibContextManagerTests
python
scipy__scipy
benchmarks/benchmarks/integrate.py
{ "start": 6494, "end": 7767 }
class ____(Benchmark): params = ( [ "gk15", "gk21", "genz-malik", ], [1e-9, 1e-10, 1e-11], ) param_names = ["rule", "rtol"] def setup(self, rule, rtol): self.a = np.array([0, 0, 0]) self.b = np.array([1, 2*np.pi, np.pi]) self.rule = rule self.rtol = rtol self.atol = 0 self.pool = ThreadPoolExecutor(2) def f(self, x): r = x[:, 0] phi = x[:, 2] return r**2 * np.sin(phi) def time_plain(self, rule, rtol): cubature( f=self.f, a=self.a, b=self.b, rule=self.rule, rtol=self.rtol, atol=self.atol, ) def time_threads(self, rule, rtol): cubature( f=self.f, a=self.a, b=self.b, rule=self.rule, rtol=self.rtol, atol=self.atol, workers=self.pool.map, ) def track_subdivisions(self, rule, rtol): res = cubature( f=self.f, a=self.a, b=self.b, rule=self.rule, rtol=self.rtol, atol=self.atol, ) return res.subdivisions
CubatureSphere
python
google__jax
jax/_src/pallas/mosaic_gpu/lowering.py
{ "start": 13442, "end": 19648 }
class ____: name: str axis_names: _AxisNames program_ids: Sequence[ir.Value] | None approx_math: bool single_wg_lane_predicate: ir.Value | None single_warp_lane_predicate: ir.Value | None smem_requested_bytes: int smem_used_bytes: int tmem_requested_cols: int tmem_used_cols: int tmem_base: ir.Value | None scoped_gmem_used_semaphores: dict[CollectiveAxesType, int] scoped_gmem_semaphore_base_ptr: dict[CollectiveAxesType, ir.Value] runtime_barriers: MutableMapping[AnyBarrier, MutableSequence[AnyBarrierRef]] name_stack: source_info_util.NameStack traceback_caches: mlir.TracebackCaches squashed_dims: tuple[int, ...] lowering_semantics: mgpu.LoweringSemantics primitive_semantics: gpu_core.PrimitiveSemantics mesh_info: pallas_utils.MeshInfo | None # See the documentation of unsafe_no_auto_barriers in CompilerParams. auto_barriers: bool warp_axis_name: str | None = None @property def single_lane_predicate(self) -> ir.Value: """Returns a predicate that is True for a single lane within the current thread semantics. """ assert self.lowering_semantics == mgpu.LoweringSemantics.Lane match self.primitive_semantics: case gpu_core.PrimitiveSemantics.Warpgroup: return self.single_wg_lane_predicate case gpu_core.PrimitiveSemantics.Warp: return self.single_warp_lane_predicate case _: raise ValueError(f"Unknown semantics: {self.primitive_semantics}") @contextlib.contextmanager def reserve_barrier( self, barrier: mgpu.Barrier ) -> Iterator[ mgpu.BarrierRef | mgpu.DialectBarrierRef | mgpu.CollectiveBarrierRef ]: """Reserves a barrier. Raises: RuntimeError: If the barrier is already reserved. """ available = self.runtime_barriers.get(barrier, []) if not available: raise RuntimeError(f"Barrier {barrier} is already reserved") barrier = available.pop() yield barrier available.append(barrier) @contextlib.contextmanager def reserve_semaphores(self, shape: tuple[int, ...], collective_axes: CollectiveAxesType ) -> Iterator[ir.Value]: allocated_sems = math.prod(shape) ref = mgpu.memref_slice( self.scoped_gmem_semaphore_base_ptr[collective_axes], mgpu.ds(self.scoped_gmem_used_semaphores[collective_axes], allocated_sems), ) ref = mgpu.memref_reshape(ref, shape) self.scoped_gmem_used_semaphores[collective_axes] += allocated_sems yield ref # TODO: In debug mode verify the values of all semaphores are again 0 self.scoped_gmem_used_semaphores[collective_axes] -= allocated_sems @contextlib.contextmanager def alloc_tmem( self, struct: jax.ShapeDtypeStruct, *, layout: tcgen05.TMEMLayout, ) -> Iterator[tcgen05.TMEMRef | ir.Value]: if self.lowering_semantics == mgpu.LoweringSemantics.Lane: off = arith_dialect.addi( self.tmem_base, _i32_constant(self.tmem_used_cols) ) tmem_ref = tcgen05.TMEMRef( address=off, shape=struct.shape, dtype=mgpu_utils.dtype_to_ir_type(struct.dtype), layout=layout, ) else: type = ir.MemRefType.get( struct.shape, mgpu_utils.dtype_to_ir_type(struct.dtype), memory_space=mgpu_utils.tmem(), ) tmem_ref = mgpu.dialect.slice_tmem( type, self.tmem_base, self.tmem_used_cols ) layout_attr = mgpu.to_layout_attr(layout) tmem_ref = mgpu.dialect.tmem_layout_cast(tmem_ref, layout_attr) cols_used = layout.cols_in_shape( struct.shape, dtypes.itemsize_bits(struct.dtype) ) cols_used = gpu_core.align_to(cols_used, gpu_core.TMEM_COL_ALIGNMENT) self.tmem_used_cols += cols_used yield tmem_ref self.tmem_used_cols -= cols_used # TODO(cperivol): Only return the shapes and figure out the sizes when freeing. @contextlib.contextmanager def scratch_view(self, struct: jax.ShapeDtypeStruct) -> Iterator[ir.Value]: """Creates a view into the runtime scratch buffer for the given struct. This is a low-level API. Use it only if you know what you are doing. The function allocates bytes at the top of a stack, which need to be deallocated in a FIFO fashion with :meth:`ModuleContext.stack_free_smem`. After deallocation, the view is invalid and cannot be used. Args: struct: The shape and dtype of the view to create. Returns: A memref view into the runtime scratch buffer. """ smem_base = None i8 = ir.IntegerType.get_signless(8) i32 = ir.IntegerType.get_signless(32) if self.lowering_semantics == mgpu.LoweringSemantics.Lane: smem_base = gpu_dialect.dynamic_shared_memory( ir.MemRefType.get( (mgpu_utils.DYNAMIC,), i8, memory_space=mgpu_utils.smem() ) ) off = initial_used_bytes = self.smem_used_bytes assert off % gpu_core.SMEM_ALIGNMENT == 0 scratch_ty = ir.MemRefType.get( struct.shape, mgpu_utils.dtype_to_ir_type(struct.dtype), memory_space=mgpu_utils.smem(), ) # The below code emission relies on the assumption that the first scratch # operand provided by Mosaic GPU always begins at the beginning of # dynamic SMEM. Mosaic GPU is expected to uphold that invariant. if self.lowering_semantics == mgpu.LoweringSemantics.Lane: view = memref_dialect.view(scratch_ty, smem_base, _as_index(off), []) else: view = mgpu.dialect.slice_smem(scratch_ty, mgpu_utils.c(off, i32)) off += gpu_core.align_to( math.prod(struct.shape) * dtypes.itemsize_bits(jnp.dtype(struct.dtype)) // 8, gpu_core.SMEM_ALIGNMENT, ) assert off <= self.smem_requested_bytes, "Ran out of scoped SMEM" assert off % gpu_core.SMEM_ALIGNMENT == 0 self.smem_used_bytes = off yield view self.smem_used_bytes = initial_used_bytes # This is morally ``ShapedArray | state.AbstractRef``, but pytype does not # allow calling methods on a union type, making ``update`` non-callable, so # we use a protocol instead of a union.
ModuleContext
python
pennersr__django-allauth
allauth/headless/socialaccount/response.py
{ "start": 3162, "end": 3374 }
class ____(APIResponse): def __init__(self, request, accounts): data = [_socialaccount_data(request, account) for account in accounts] super().__init__(request, data=data)
SocialAccountsResponse
python
huggingface__transformers
tests/kernels/test_kernels.py
{ "start": 12731, "end": 14485 }
class ____(TestCasePlus): def test_load_and_register_flash_attn_like_kernel(self): kernel_obj = types.SimpleNamespace(flash_attn_varlen_func=lambda *a, **k: None) with ( patch("transformers.integrations.hub_kernels.get_kernel", return_value=kernel_obj), patch("transformers.integrations.hub_kernels.lazy_import_flash_attention", return_value=None), ): attn_impl = "org/model" load_and_register_attn_kernel(attn_impl) self.assertIn(attn_impl, ALL_ATTENTION_FUNCTIONS.valid_keys()) # Cleanup registration to avoid leaking functions across tests try: ALL_ATTENTION_FUNCTIONS.pop(attn_impl, None) except Exception: pass try: ALL_MASK_ATTENTION_FUNCTIONS.pop(attn_impl, None) except Exception: pass def test_load_and_register_named_function_kernel(self): def my_attention(*args, **kwargs): return None kernel_obj = types.SimpleNamespace(my_func=my_attention) with patch("transformers.integrations.hub_kernels.get_kernel", return_value=kernel_obj): attn_impl = "org/model:my_func" load_and_register_attn_kernel(attn_impl) self.assertIn(attn_impl, ALL_ATTENTION_FUNCTIONS.valid_keys()) # Cleanup registration to avoid leaking functions across tests try: ALL_ATTENTION_FUNCTIONS.pop(attn_impl, None) except Exception: pass try: ALL_MASK_ATTENTION_FUNCTIONS.pop(attn_impl, None) except Exception: pass @require_kernels
TestAttentionKernelRegistration
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/ruff/RUF032.py
{ "start": 877, "end": 1475 }
class ____(): value: float | int | str def __init__(self, value: float | int | str) -> None: self.value = value val = Decimal(0.0) val = Decimal("0.0") val = Decimal(10.0) val = Decimal("10.0") val = Decimal(-10.0) val = Decimal("-10.0") a = 10.0 val = Decimal(a) # Retest with fully qualified import val = decimal.Decimal(0.0) # Should error val = decimal.Decimal("0.0") val = decimal.Decimal(10.0) # Should error val = decimal.Decimal("10.0") val = decimal.Decimal(-10.0) # Should error val = decimal.Decimal("-10.0") a = 10.0 val = decimal.Decimal(a)
Decimal
python
pandas-dev__pandas
pandas/tests/arithmetic/test_numeric.py
{ "start": 32152, "end": 40952 }
class ____: # __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__ # for non-timestamp/timedelta/period dtypes @pytest.mark.parametrize( "first, second, expected", [ ( Series([1, 2, 3], index=list("ABC"), name="x"), Series([2, 2, 2], index=list("ABD"), name="x"), Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x"), ), ( Series([1, 2, 3], index=list("ABC"), name="x"), Series([2, 2, 2, 2], index=list("ABCD"), name="x"), Series([3, 4, 5, np.nan], index=list("ABCD"), name="x"), ), ], ) def test_add_series(self, first, second, expected): # GH#1134 tm.assert_series_equal(first + second, expected) tm.assert_series_equal(second + first, expected) @pytest.mark.parametrize( "first, second, expected", [ ( pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")), pd.DataFrame({"x": [2, 2, 2]}, index=list("ABD")), pd.DataFrame({"x": [3.0, 4.0, np.nan, np.nan]}, index=list("ABCD")), ), ( pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")), pd.DataFrame({"x": [2, 2, 2, 2]}, index=list("ABCD")), pd.DataFrame({"x": [3, 4, 5, np.nan]}, index=list("ABCD")), ), ], ) def test_add_frames(self, first, second, expected): # GH#1134 tm.assert_frame_equal(first + second, expected) tm.assert_frame_equal(second + first, expected) # TODO: This came from series.test.test_operators, needs cleanup def test_series_frame_radd_bug(self, fixed_now_ts): # GH#353 vals = Series([str(i) for i in range(5)]) result = "foo_" + vals expected = vals.map(lambda x: "foo_" + x) tm.assert_series_equal(result, expected) frame = pd.DataFrame({"vals": vals}) result = "foo_" + frame expected = pd.DataFrame({"vals": vals.map(lambda x: "foo_" + x)}) tm.assert_frame_equal(result, expected) ts = Series( np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10), name="ts", ) # really raise this time fix_now = fixed_now_ts.to_pydatetime() msg = "|".join( [ "unsupported operand type", # wrong error message, see https://github.com/numpy/numpy/issues/18832 "Concatenation operation", ] ) with pytest.raises(TypeError, match=msg): fix_now + ts with pytest.raises(TypeError, match=msg): ts + fix_now # TODO: This came from series.test.test_operators, needs cleanup def test_datetime64_with_index(self): # arithmetic integer ops with an index ser = Series(np.random.default_rng(2).standard_normal(5)) expected = ser - ser.index.to_series() result = ser - ser.index tm.assert_series_equal(result, expected) # GH#4629 # arithmetic datetime64 ops with an index ser = Series( date_range("20130101", periods=5), index=date_range("20130101", periods=5), ) expected = ser - ser.index.to_series() result = ser - ser.index tm.assert_series_equal(result, expected) msg = "cannot subtract PeriodArray from DatetimeArray" with pytest.raises(TypeError, match=msg): # GH#18850 result = ser - ser.index.to_period() df = pd.DataFrame( np.random.default_rng(2).standard_normal((5, 2)), index=date_range("20130101", periods=5), ) df["date"] = pd.Timestamp("20130102") df["expected"] = df["date"] - df.index.to_series() df["result"] = df["date"] - df.index tm.assert_series_equal(df["result"], df["expected"], check_names=False) # TODO: taken from tests.frame.test_operators, needs cleanup def test_frame_operators(self, float_frame): frame = float_frame garbage = np.random.default_rng(2).random(4) colSeries = Series(garbage, index=np.array(frame.columns)) idSum = frame + frame seriesSum = frame + colSeries for col, series in idSum.items(): for idx, val in series.items(): origVal = frame[col][idx] * 2 if not np.isnan(val): assert val == origVal else: assert np.isnan(origVal) for col, series in seriesSum.items(): for idx, val in series.items(): origVal = frame[col][idx] + colSeries[col] if not np.isnan(val): assert val == origVal else: assert np.isnan(origVal) def test_frame_operators_col_align(self, float_frame): frame2 = pd.DataFrame(float_frame, columns=["D", "C", "B", "A"]) added = frame2 + frame2 expected = frame2 * 2 tm.assert_frame_equal(added, expected) def test_frame_operators_none_to_nan(self): df = pd.DataFrame({"a": ["a", None, "b"]}) tm.assert_frame_equal(df + df, pd.DataFrame({"a": ["aa", np.nan, "bb"]})) @pytest.mark.parametrize("dtype", ("float", "int64")) def test_frame_operators_empty_like(self, dtype): # Test for issue #10181 frames = [ pd.DataFrame(dtype=dtype), pd.DataFrame(columns=["A"], dtype=dtype), pd.DataFrame(index=[0], dtype=dtype), ] for df in frames: assert (df + df).equals(df) tm.assert_frame_equal(df + df, df) @pytest.mark.parametrize( "func", [lambda x: x * 2, lambda x: x[::2], lambda x: 5], ids=["multiply", "slice", "constant"], ) def test_series_operators_arithmetic(self, all_arithmetic_functions, func): op = all_arithmetic_functions series = Series( np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10), name="ts", ) other = func(series) compare_op(series, other, op) @pytest.mark.parametrize( "func", [lambda x: x + 1, lambda x: 5], ids=["add", "constant"] ) def test_series_operators_compare(self, comparison_op, func): op = comparison_op series = Series( np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10), name="ts", ) other = func(series) compare_op(series, other, op) @pytest.mark.parametrize( "func", [lambda x: x * 2, lambda x: x[::2], lambda x: 5], ids=["multiply", "slice", "constant"], ) def test_divmod(self, func): series = Series( np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10), name="ts", ) other = func(series) results = divmod(series, other) if isinstance(other, abc.Iterable) and len(series) != len(other): # if the lengths don't match, this is the test where we use # `tser[::2]`. Pad every other value in `other_np` with nan. other_np = [] for n in other: other_np.append(n) other_np.append(np.nan) else: other_np = other other_np = np.asarray(other_np) with np.errstate(all="ignore"): expecteds = divmod(series.values, np.asarray(other_np)) for result, expected in zip(results, expecteds, strict=True): # check the values, name, and index separately tm.assert_almost_equal(np.asarray(result), expected) assert result.name == series.name tm.assert_index_equal(result.index, series.index._with_freq(None)) def test_series_divmod_zero(self): # Check that divmod uses pandas convention for division by zero, # which does not match numpy. # pandas convention has # 1/0 == np.inf # -1/0 == -np.inf # 1/-0.0 == -np.inf # -1/-0.0 == np.inf tser = Series( np.arange(1, 11, dtype=np.float64), index=date_range("2020-01-01", periods=10), name="ts", ) other = tser * 0 result = divmod(tser, other) exp1 = Series([np.inf] * len(tser), index=tser.index, name="ts") exp2 = Series([np.nan] * len(tser), index=tser.index, name="ts") tm.assert_series_equal(result[0], exp1) tm.assert_series_equal(result[1], exp2)
TestAdditionSubtraction
python
django__django
tests/select_related/models.py
{ "start": 1360, "end": 1561 }
class ____(models.Model): name = models.CharField(max_length=50) genus = models.ForeignKey(Genus, models.CASCADE) # and we'll invent a new thing so we have a model with two foreign keys
Species
python
mlflow__mlflow
mlflow/gateway/config.py
{ "start": 6330, "end": 6618 }
class ____(ConfigModel): model_server_url: str # Workaround to suppress warning that Pydantic raises when a field name starts with "model_". # https://github.com/mlflow/mlflow/issues/10335 model_config = pydantic.ConfigDict(protected_namespaces=())
MlflowModelServingConfig
python
ray-project__ray
release/train_tests/benchmark/dataloader_factory.py
{ "start": 205, "end": 876 }
class ____(ABC): """Base class for creating and managing dataloaders.""" def __init__(self, benchmark_config: BenchmarkConfig): self.benchmark_config = benchmark_config def get_dataloader_config(self) -> DataLoaderConfig: return self.benchmark_config.dataloader_config @abstractmethod def get_train_dataloader(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]: pass @abstractmethod def get_val_dataloader(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]: pass def get_metrics(self) -> Dict[str, Any]: """Return metrics about dataloader performance.""" return {}
BaseDataLoaderFactory
python
great-expectations__great_expectations
great_expectations/expectations/core/expect_column_values_to_not_be_null.py
{ "start": 2382, "end": 18254 }
class ____(ColumnMapExpectation): __doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION} To be counted as an exception, values must be explicitly null or missing, such as a NULL in PostgreSQL or an np.NaN in pandas. Empty strings don't count as null unless they have been coerced to a null type. ExpectColumnValuesToNotBeNull is a \ Column Map Expectation. Column Map Expectations are one of the most common types of Expectation. They are evaluated for a single column and ask a yes/no question for every row in that column. Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid. Args: column (str): \ {COLUMN_DESCRIPTION} Other Parameters: mostly (None or a float between 0 and 1): \ {MOSTLY_DESCRIPTION} \ For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly). Default 1. result_format (str or None): \ Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \ For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format). catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions). meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta). severity (str or None): \ {FAILURE_SEVERITY_DESCRIPTION} \ For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity). Returns: An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result) Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta. See Also: [ExpectColumnValuesToBeNull](https://greatexpectations.io/expectations/expect_column_values_to_be_null) Supported Data Sources: [{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/) Data Quality Issues: {DATA_QUALITY_ISSUES[0]} Example Data: test test2 0 NaN "A" 1 True NaN 2 False NaN Code Examples: Passing Case: Input: ExpectColumnValuesToNotBeNull( column="test", mostly=0.66 ) Output: {{ "exception_info": {{ "raised_exception": false, "exception_traceback": null, "exception_message": null }}, "result": {{ "element_count": 3, "unexpected_count": 1, "unexpected_percent": 33.33333333333333, "partial_unexpected_list": [ null ] }}, "meta": {{}}, "success": true }} Failing Case: Input: ExpectColumnValuesToNotBeNull( column="test2" ) Output: {{ "exception_info": {{ "raised_exception": false, "exception_traceback": null, "exception_message": null }}, "result": {{ "element_count": 3, "unexpected_count": 2, "unexpected_percent": 66.66666666666666, "partial_unexpected_list": [ null, null ] }}, "meta": {{}}, "success": false }} """ # noqa: E501 # FIXME CoP library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = { "maturity": "production", "tags": ["core expectation", "column map expectation"], "contributors": ["@great_expectations"], "requirements": [], "has_full_test_suite": True, "manually_reviewed_code": True, } _library_metadata = library_metadata map_metric: ClassVar[str] = "column_values.nonnull" args_keys: ClassVar[Tuple[str, ...]] = ("column",) class Config: title = "Expect column values to not be null" @staticmethod def schema_extra( schema: Dict[str, Any], model: Type[ExpectColumnValuesToNotBeNull] ) -> None: ColumnMapExpectation.Config.schema_extra(schema, model) schema["properties"]["metadata"]["properties"].update( { "data_quality_issues": { "title": "Data Quality Issues", "type": "array", "const": DATA_QUALITY_ISSUES, }, "library_metadata": { "title": "Library Metadata", "type": "object", "const": model._library_metadata, }, "short_description": { "title": "Short Description", "type": "string", "const": EXPECTATION_SHORT_DESCRIPTION, }, "supported_data_sources": { "title": "Supported Data Sources", "type": "array", "const": SUPPORTED_DATA_SOURCES, }, } ) @classmethod @override def _prescriptive_template( cls, renderer_configuration: RendererConfiguration, ) -> RendererConfiguration: add_param_args: AddParamArgs = ( ("column", RendererValueType.STRING), ("mostly", RendererValueType.NUMBER), ) for name, param_type in add_param_args: renderer_configuration.add_param(name=name, param_type=param_type) params = renderer_configuration.params if params.mostly and params.mostly.value < 1.0: renderer_configuration = cls._add_mostly_pct_param( renderer_configuration=renderer_configuration ) template_str = "values must not be null, at least $mostly_pct % of the time." else: template_str = "values must never be null." if renderer_configuration.include_column_name: template_str = f"$column {template_str}" renderer_configuration.template_str = template_str return renderer_configuration @classmethod @override @renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE) @render_suite_parameter_string def _prescriptive_renderer( cls, configuration: Optional[ExpectationConfiguration] = None, result: Optional[ExpectationValidationResult] = None, runtime_configuration: Optional[dict] = None, **kwargs, ) -> list[RenderedStringTemplateContent]: runtime_configuration = runtime_configuration or {} include_column_name = runtime_configuration.get("include_column_name") is not False styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs, # type: ignore[union-attr] # FIXME: could be None ["column", "mostly", "row_condition", "condition_parser"], ) if params["mostly"] is not None: if isinstance(params["mostly"], (int, float)) and params["mostly"] < 1.0: params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True) # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") # noqa: E501 # FIXME CoP if include_column_name: template_str = ( "$column values must not be null, at least $mostly_pct % of the time." ) else: template_str = "values must not be null, at least $mostly_pct % of the time." else: # noqa: PLR5501 # FIXME CoP if include_column_name: template_str = "$column values must never be null." else: template_str = "values must never be null." if params["row_condition"] is not None: conditional_template_str = parse_row_condition_string(params["row_condition"]) template_str, styling = _style_row_condition( conditional_template_str, template_str, params, styling, ) return [ RenderedStringTemplateContent( content_block_type="string_template", string_template={ "template": template_str, "params": params, "styling": styling, }, ) ] @classmethod @override @renderer(renderer_type=LegacyDiagnosticRendererType.OBSERVED_VALUE) def _diagnostic_observed_value_renderer( cls, configuration: Optional[ExpectationConfiguration] = None, result: Optional[ExpectationValidationResult] = None, runtime_configuration: Optional[dict] = None, **kwargs, ): result_dict = result.result # type: ignore[union-attr] # FIXME: could be None try: null_percent = result_dict["unexpected_percent"] return num_to_str(100 - null_percent, precision=5, use_locale=True) + "% not null" except KeyError: return "unknown % not null" except TypeError: return "NaN% not null" return "--" @classmethod @renderer(renderer_type=LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_MISSING_COUNT_ROW) def _descriptive_column_properties_table_missing_count_row_renderer( cls, configuration: Optional[ExpectationConfiguration] = None, result: Optional[ExpectationValidationResult] = None, runtime_configuration: Optional[dict] = None, **kwargs, ): assert result, "Must pass in result." return [ RenderedStringTemplateContent( content_block_type="string_template", string_template={ "template": "Missing (n)", "tooltip": {"content": "expect_column_values_to_not_be_null"}, }, ), result.result["unexpected_count"] if "unexpected_count" in result.result and result.result["unexpected_count"] is not None else "--", ] @classmethod @renderer( renderer_type=LegacyDescriptiveRendererType.COLUMN_PROPERTIES_TABLE_MISSING_PERCENT_ROW ) def _descriptive_column_properties_table_missing_percent_row_renderer( cls, configuration: Optional[ExpectationConfiguration] = None, result: Optional[ExpectationValidationResult] = None, runtime_configuration: Optional[dict] = None, **kwargs, ): assert result, "Must pass in result." return [ RenderedStringTemplateContent( content_block_type="string_template", string_template={ "template": "Missing (%)", "tooltip": {"content": "expect_column_values_to_not_be_null"}, }, ), f"{result.result['unexpected_percent']:.1f}%" if "unexpected_percent" in result.result and result.result["unexpected_percent"] is not None else "--", ] @override def _validate( self, metrics: Dict, runtime_configuration: Optional[dict] = None, execution_engine: Optional[ExecutionEngine] = None, ): result_format = self._get_result_format(runtime_configuration=runtime_configuration) mostly = self._get_success_kwargs().get("mostly") total_count = metrics.get("table.row_count") unexpected_count = metrics.get( f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}" ) if total_count is None or total_count == 0: # Vacuously true success = True else: success_ratio = (total_count - unexpected_count) / total_count success = success_ratio >= mostly nonnull_count = None # Handle unexpected_rows for include_unexpected_rows feature parsed_result_format = parse_result_format(result_format) unexpected_rows = None if parsed_result_format.get("include_unexpected_rows", False): unexpected_rows = metrics.get( f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}" ) return _format_map_output( result_format=parsed_result_format, success=success, element_count=metrics.get("table.row_count"), nonnull_count=nonnull_count, unexpected_count=metrics.get( f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}" ), unexpected_list=metrics.get( f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}" ), unexpected_index_list=metrics.get( f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}" ), unexpected_index_query=metrics.get( f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}" ), unexpected_index_column_names=parsed_result_format.get("unexpected_index_column_names"), unexpected_rows=unexpected_rows, )
ExpectColumnValuesToNotBeNull
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_bch_address.py
{ "start": 890, "end": 1891 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_bch_address" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_valid_bch_address(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidBchAddress
python
readthedocs__readthedocs.org
readthedocs/projects/views/private.py
{ "start": 34224, "end": 34344 }
class ____(IntegrationMixin, DetailView): template_name = "projects/integration_webhook_detail.html"
IntegrationDetail
python
pytorch__pytorch
torch/_inductor/codecache.py
{ "start": 26136, "end": 26293 }
class ____: """ See FxGraphHashDetails. Holds a sorted list to support stable hashing of set kwargs. """ items: list[Any]
OrderedSetHolder
python
kamyu104__LeetCode-Solutions
Python/find-minimum-diameter-after-merging-two-trees.py
{ "start": 68, "end": 1530 }
class ____(object): def minimumDiameterAfterMerge(self, edges1, edges2): """ :type edges1: List[List[int]] :type edges2: List[List[int]] :rtype: int """ def ceil_divide(a, b): return (a+b-1)//2 def tree_diameter(edges): def iter_dfs(): result = 0 stk = [(1, (0, -1, [0]))] while stk: step, args = stk.pop() if step == 1: u, p, ret = args for v in reversed(adj[u]): if v == p: continue ret2 = [0] stk.append((2, (ret2, ret))) stk.append((1, (v, u, ret2))) elif step == 2: ret2, ret = args result = max(result, ret[0]+(ret2[0]+1)) ret[0] = max(ret[0], ret2[0]+1) return result adj = [[] for _ in range(len(edges)+1)] for u, v in edges: adj[u].append(v) adj[v].append(u) return iter_dfs() d1 = tree_diameter(edges1) d2 = tree_diameter(edges2) return max(ceil_divide(d1, 2)+1+ceil_divide(d2, 2), d1, d2) # Time: O(n + m) # Space: O(n + m) # dfs, tree diameter
Solution
python
dask__dask
dask/dataframe/tseries/resample.py
{ "start": 6728, "end": 6784 }
class ____(ResampleReduction): how = "sem"
ResampleSem
python
jina-ai__jina
jina/jaml/helper.py
{ "start": 2576, "end": 2664 }
class ____(Resolver): """Remove `on|On|ON` as bool resolver.""" pass
JinaResolver
python
chroma-core__chroma
chromadb/db/base.py
{ "start": 3267, "end": 5765 }
class ____(pypika.Parameter): # type: ignore """ Wrapper class for PyPika paramters that allows the values for Parameters to be expressed inline while building a query. See get_sql() for detailed usage information. """ def __init__(self, value: Any): self.value = value @override def get_sql(self, **kwargs: Any) -> str: if isinstance(self.value, (list, tuple)): _context.values.extend(self.value) indexes = islice(_context.generator, len(self.value)) placeholders = ", ".join(_context.formatstr.format(i) for i in indexes) val = f"({placeholders})" else: _context.values.append(self.value) val = _context.formatstr.format(next(_context.generator)) return str(val) def get_sql( query: pypika.queries.QueryBuilder, formatstr: str = "?" ) -> Tuple[str, Tuple[Any, ...]]: """ Wrapper for pypika's get_sql method that allows the values for Parameters to be expressed inline while building a query, and that returns a tuple of the SQL string and parameters. This makes it easier to construct complex queries programmatically and automatically matches up the generated SQL with the required parameter vector. Doing so requires using the ParameterValue class defined in this module instead of the base pypika.Parameter class. Usage Example: q = ( pypika.Query().from_("table") .select("col1") .where("col2"==ParameterValue("foo")) .where("col3"==ParameterValue("bar")) ) sql, params = get_sql(q) cursor.execute(sql, params) Note how it is not necessary to construct the parameter vector manually... it will always be generated with the parameter values in the same order as emitted SQL string. The format string should match the parameter format for the database being used. It will be called with str.format(i) where i is the numeric index of the parameter. For example, Postgres requires parameters like `:1`, `:2`, etc. so the format string should be `":{}"`. See https://pypika.readthedocs.io/en/latest/2_tutorial.html#parametrized-queries for more information on parameterized queries in PyPika. """ _context.values = [] _context.generator = count(1) _context.formatstr = formatstr sql = query.get_sql() params = tuple(_context.values) return sql, params
ParameterValue
python
sympy__sympy
sympy/physics/mechanics/tests/test_actuator.py
{ "start": 30270, "end": 41081 }
class ____: @pytest.fixture(autouse=True) def _block_on_surface(self): """A block sliding on a surface. Notes ===== This test validates the correctness of the CoulombKineticFriction by simulating a block sliding on a surface with the Coulomb kinetic friction force. The test covers scenarios with both positive and negative velocities. """ # Mass, gravity constant, friction coefficient, coefficient of Stribeck friction, viscous_coefficient self.m, self.g, self.mu_k, self.mu_s, self.v_s, self.sigma, self.F = symbols('m g mu_k mu_s v_s sigma F', real=True) def test_block_on_surface_default(self): # General Case q = dynamicsymbols('q') N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway) expected_general = [Force(point=O, force=self.g * self.m * self.mu_k * q * sign(sqrt(q**2) * q.diff()/q)/sqrt(q**2) * N.x), Force(point=P, force=-self.g * self.m * self.mu_k * q * sign(sqrt(q**2) * q.diff()/q)/sqrt(q**2) * N.x)] assert friction.to_loads() == expected_general # Positive q = dynamicsymbols('q', positive=True) N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway) expected_positive = [Force(point=O, force=self.g * self.m * self.mu_k * sign(q.diff()) * N.x), Force(point=P, force=-self.g * self.m * self.mu_k * sign(q.diff()) * N.x)] assert friction.to_loads() == expected_positive # Negative q = dynamicsymbols('q', positive=False) N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway) expected_negative = [Force(point=O, force=self.g * self.m * self.mu_k * q * sign(sqrt(q**2) * q.diff()/q)/sqrt(q**2)*N.x), Force(point=P, force=-self.g * self.m * self.mu_k * q * sign(sqrt(q**2) * q.diff()/q)/sqrt(q**2)*N.x)] assert friction.to_loads() == expected_negative def test_block_on_surface_viscous(self): # General Case q = dynamicsymbols('q') N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, sigma=self.sigma) expected_general = [Force(point=O, force=(self.g * self.m * self.mu_k * sign(sqrt(q**2) * q.diff()/q) + self.sigma * sqrt(q**2) * q.diff()/q) * q/sqrt(q**2) * N.x), Force(point=P, force=(-self.g * self.m * self.mu_k * sign(sqrt(q**2) * q.diff()/q) - self.sigma * sqrt(q**2) * q.diff()/q) * q/sqrt(q**2) * N.x)] assert friction.to_loads() == expected_general # Positive q = dynamicsymbols('q', positive=True) N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, sigma=self.sigma) expected_positive = [Force(point=O, force=(self.g * self.m * self.mu_k * sign(q.diff()) + self.sigma * q.diff()) * N.x), Force(point=P, force=(-self.g * self.m * self.mu_k * sign(q.diff()) - self.sigma * q.diff()) * N.x)] assert friction.to_loads() == expected_positive # Negative q = dynamicsymbols('q', positive=False) N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, sigma=self.sigma) expected_negative = [Force(point=O, force=(self.g * self.m * self.mu_k * sign(sqrt(q**2) * q.diff()/q) + self.sigma * sqrt(q**2) * q.diff()/q) * q/sqrt(q**2) * N.x), Force(point=P, force=(-self.g * self.m * self.mu_k * sign(sqrt(q**2) * q.diff()/q) - self.sigma * sqrt(q**2) * q.diff()/q) * q/sqrt(q**2) * N.x)] assert friction.to_loads() == expected_negative def test_block_on_surface_stribeck(self): # General Case q = dynamicsymbols('q') N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, v_s=self.v_s, mu_s=self.mu_s) expected_general = [Force(point=O, force=(self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * q * sign(sqrt(q**2) * q.diff()/q)/sqrt(q**2) * N.x), Force(point=P, force=- (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * q * sign(sqrt(q**2) * q.diff()/q)/sqrt(q**2) * N.x)] assert friction.to_loads() == expected_general # Positive q = dynamicsymbols('q', positive=True) N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, v_s=self.v_s, mu_s=self.mu_s) expected_positive = [Force(point=O, force=(self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * sign(q.diff()) * N.x), Force(point=P, force=- (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * sign(q.diff()) * N.x)] assert friction.to_loads() == expected_positive # Negative q = dynamicsymbols('q', positive=False) N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, v_s=self.v_s, mu_s=self.mu_s) expected_negative = [Force(point=O, force=(self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * q * sign(sqrt(q**2) * q.diff()/q)/sqrt(q**2) * N.x), Force(point=P, force=- (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * q * sign(sqrt(q**2) * q.diff()/q)/sqrt(q**2) * N.x)] assert friction.to_loads() == expected_negative def test_block_on_surface_all(self): # General Case q = dynamicsymbols('q') N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, v_s=self.v_s, sigma=self.sigma, mu_s=self.mu_s) expected_general = [Force(point=O, force=(self.sigma * sqrt(q**2) * q.diff()/q + (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * sign(sqrt(q**2) * q.diff()/q)) * q/sqrt(q**2) * N.x), Force(point=P, force=(-self.sigma * sqrt(q**2) * q.diff()/q - (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * sign(sqrt(q**2) * q.diff()/q)) * q/sqrt(q**2) * N.x)] assert friction.to_loads() == expected_general # Positive q = dynamicsymbols('q', positive=True) N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, v_s=self.v_s, sigma=self.sigma, mu_s=self.mu_s) expected_positive = [Force(point=O, force=(self.sigma * q.diff() + (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * sign(q.diff())) * N.x), Force(point=P, force=(-self.sigma * q.diff() - (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * sign(q.diff())) * N.x)] assert friction.to_loads() == expected_positive # Negative q = dynamicsymbols('q', positive=False) N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction(self.mu_k, self.m * self.g, pathway, v_s=self.v_s, sigma=self.sigma, mu_s=self.mu_s) expected_negative = [Force(point=O, force=(self.sigma * sqrt(q**2) * q.diff()/q + (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * sign(sqrt(q**2) * q.diff()/q)) * q/sqrt(q**2) * N.x), Force(point=P, force=(-self.sigma * sqrt(q**2) * q.diff()/q - (self.g * self.m * self.mu_k + (-self.g * self.m * self.mu_k + self.g * self.m * self.mu_s) * exp(-q.diff()**2/self.v_s**2)) * sign(sqrt(q**2) * q.diff()/q)) * q/sqrt(q**2) * N.x)] assert friction.to_loads() == expected_negative def test_normal_force_zero(self): q = dynamicsymbols('q') N = ReferenceFrame('N') O = Point('O') P = O.locatenew('P', q * N.x) O.set_vel(N, 0) P.set_vel(N, q.diff() * N.x) pathway = LinearPathway(O, P) friction = CoulombKineticFriction( self.mu_k, 0, pathway ) assert friction.force == 0
TestCoulombKineticFriction
python
pypa__hatch
tests/backend/builders/test_config.py
{ "start": 14244, "end": 16091 }
class ____: def test_default(self, isolation): builder = MockBuilder(str(isolation)) assert builder.config.only_packages is builder.config.only_packages is False def test_target(self, isolation): config = {"tool": {"hatch": {"build": {"targets": {"foo": {"only-packages": True}}}}}} builder = MockBuilder(str(isolation), config=config) builder.PLUGIN_NAME = "foo" assert builder.config.only_packages is True def test_target_not_boolean(self, isolation): config = {"tool": {"hatch": {"build": {"targets": {"foo": {"only-packages": 9000}}}}}} builder = MockBuilder(str(isolation), config=config) builder.PLUGIN_NAME = "foo" with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.foo.only-packages` must be a boolean"): _ = builder.config.only_packages def test_global(self, isolation): config = {"tool": {"hatch": {"build": {"only-packages": True}}}} builder = MockBuilder(str(isolation), config=config) builder.PLUGIN_NAME = "foo" assert builder.config.only_packages is True def test_global_not_boolean(self, isolation): config = {"tool": {"hatch": {"build": {"only-packages": 9000}}}} builder = MockBuilder(str(isolation), config=config) builder.PLUGIN_NAME = "foo" with pytest.raises(TypeError, match="Field `tool.hatch.build.only-packages` must be a boolean"): _ = builder.config.only_packages def test_target_overrides_global(self, isolation): config = {"tool": {"hatch": {"build": {"only-packages": True, "targets": {"foo": {"only-packages": False}}}}}} builder = MockBuilder(str(isolation), config=config) builder.PLUGIN_NAME = "foo" assert builder.config.only_packages is False
TestOnlyPackages
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 52481, "end": 53426 }
class ____(TestCase): """ Test the current/default timezone handling in `DateTimeField`. """ @classmethod def setup_class(cls): cls.field = serializers.DateTimeField() cls.kolkata = ZoneInfo('Asia/Kolkata') def assertUTC(self, tzinfo): """ Check UTC for datetime.timezone, ZoneInfo, and pytz tzinfo instances. """ assert ( tzinfo is utc or (getattr(tzinfo, "key", None) or getattr(tzinfo, "zone", None)) == "UTC" ) def test_default_timezone(self): self.assertUTC(self.field.default_timezone()) def test_current_timezone(self): self.assertUTC(self.field.default_timezone()) activate(self.kolkata) assert self.field.default_timezone() == self.kolkata deactivate() self.assertUTC(self.field.default_timezone()) @override_settings(TIME_ZONE='UTC', USE_TZ=True)
TestDefaultTZDateTimeField
python
django__django
tests/file_storage/tests.py
{ "start": 47743, "end": 48586 }
class ____(LiveServerTestCase): """ Test file-like objects (#15644). """ available_apps = [] def setUp(self): self.temp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.temp_dir) self.storage = FileSystemStorage(location=self.temp_dir) def test_urllib_request_urlopen(self): """ Test the File storage API with a file-like object coming from urllib.request.urlopen(). """ file_like_object = urlopen(self.live_server_url + "/") f = File(file_like_object) stored_filename = self.storage.save("remote_file.html", f) remote_file = urlopen(self.live_server_url + "/") with self.storage.open(stored_filename) as stored_file: self.assertEqual(stored_file.read(), remote_file.read())
FileLikeObjectTestCase
python
doocs__leetcode
solution/2100-2199/2123.Minimum Operations to Remove Adjacent Ones in Matrix/Solution.py
{ "start": 0, "end": 1085 }
class ____: def minimumOperations(self, grid: List[List[int]]) -> int: def find(i: int) -> int: for j in g[i]: if j not in vis: vis.add(j) if match[j] == -1 or find(match[j]): match[j] = i return 1 return 0 g = defaultdict(list) m, n = len(grid), len(grid[0]) for i, row in enumerate(grid): for j, v in enumerate(row): if (i + j) % 2 and v: x = i * n + j if i < m - 1 and grid[i + 1][j]: g[x].append(x + n) if i and grid[i - 1][j]: g[x].append(x - n) if j < n - 1 and grid[i][j + 1]: g[x].append(x + 1) if j and grid[i][j - 1]: g[x].append(x - 1) match = [-1] * (m * n) ans = 0 for i in g.keys(): vis = set() ans += find(i) return ans
Solution
python
PyCQA__pylint
tests/functional/c/class_members_py30.py
{ "start": 215, "end": 572 }
class ____: """class docstring""" def __init__(self): """init""" self.correct = 1 def test(self): """test""" self.correct += 2 self.incorrect += 2 # [no-member] del self.havenot # [no-member] self.nonexistent1.truc() # [no-member] self.nonexistent2[1] = 'hehe' # [no-member]
MyClass
python
pennersr__django-allauth
allauth/core/internal/adapter.py
{ "start": 87, "end": 502 }
class ____: def __init__(self, request=None): # Explicitly passing `request` is deprecated, just use: # `allauth.core.context.request`. self.request = context.request def validation_error(self, code, *args): message = self.error_messages[code] if args: message = message % args exc = ValidationError(message, code=code) return exc
BaseAdapter
python
walkccc__LeetCode
solutions/3180. Maximum Total Reward Using Operations I/3180.py
{ "start": 1091, "end": 1424 }
class ____: def maxTotalReward(self, rewardValues: list[int]) -> int: dp = 1 # the possible rewards (initially, 0 is achievable) for num in sorted(rewardValues): # Remove the numbers >= the current number. smallerNums = dp & ((1 << num) - 1) dp |= smallerNums << num return dp.bit_length() - 1
Solution
python
tornadoweb__tornado
tornado/locks.py
{ "start": 962, "end": 1628 }
class ____: """Base class for objects that periodically clean up timed-out waiters. Avoids memory leak in a common pattern like: while True: yield condition.wait(short_timeout) print('looping....') """ def __init__(self) -> None: self._waiters = collections.deque() # type: Deque[Future] self._timeouts = 0 def _garbage_collect(self) -> None: # Occasionally clear timed-out waiters. self._timeouts += 1 if self._timeouts > 100: self._timeouts = 0 self._waiters = collections.deque(w for w in self._waiters if not w.done())
_TimeoutGarbageCollector
python
has2k1__plotnine
plotnine/_mpl/layout_manager/_spaces.py
{ "start": 14170, "end": 17205 }
class ____(_side_spaces): """ Space in the figure for artists on the right of the panel area Ordered from the edge of the figure and going inwards """ plot_margin: float = 0 tag_alignment: float = 0 plot_tag_margin_right: float = 0 plot_tag: float = 0 plot_tag_margin_left: float = 0 margin_alignment: float = 0 legend: float = 0 legend_box_spacing: float = 0 strip_text_y_extra_width: float = 0 def _calculate(self): items = self.items theme = self.items.plot.theme calc = self.items.calc self.plot_margin = theme.getp("plot_margin_right") if self.has_tag and items.plot_tag: m = theme.get_margin("plot_tag").fig self.plot_tag_margin_right = m.r self.plot_tag = calc.width(items.plot_tag) self.plot_tag_margin_left = m.l if items.legends and items.legends.right: self.legend = self.legend_width self.legend_box_spacing = theme.getp("legend_box_spacing") self.strip_text_y_extra_width = items.strip_text_y_extra_width("right") # Adjust plot_margin to make room for ylabels that protude well # beyond the axes # NOTE: This adjustment breaks down when the protrusion is large protrusion = items.axis_text_x_right_protrusion("all") adjustment = protrusion - (self.total - self.plot_margin) if adjustment > 0: self.plot_margin += adjustment @property def offset(self): """ Distance from right of the figure to the right of the plot gridspec ---------------(1, 1) | ---- | | | | -dx | | | |<--->| | | | | | ---- | (0, 0)--------------- """ return self.gs.bbox_relative.x1 - 1 def x1(self, item: str) -> float: """ Lower x-coordinate in figure space of the item """ return self.to_figure_space(1 - self.sum_incl(item)) def x2(self, item: str) -> float: """ Higher x-coordinate in figure space of the item """ return self.to_figure_space(1 - self.sum_upto(item)) @property def panel_right_relative(self): """ Right (relative to the gridspec) of the panels in figure dimensions """ return 1 - self.total @property def panel_right(self): """ Right of the panels in figure space """ return self.to_figure_space(self.panel_right_relative) @property def plot_right(self): """ Distance up to the right-most artist in figure space """ return self.x2("legend") @property def tag_width(self): """ The width of the tag including the margins """ return ( self.plot_tag_margin_right + self.plot_tag + self.plot_tag_margin_left ) @dataclass
right_spaces
python
pytorch__pytorch
test/inductor/test_dependencies.py
{ "start": 473, "end": 5311 }
class ____(InductorTestCase): def _create_buffer(self, name, shape, dtype=torch.float32): return Buffer( name=name, layout=FixedLayout(torch.device(GPU_TYPE), dtype=dtype, size=shape), ) def setUp(self): super().setUp() class DummyModule(torch.nn.Module): def forward(self, x): return x * 2 self._gm = torch.fx.symbolic_trace(DummyModule()) self._graph = GraphLowering(self._gm) self._stack = contextlib.ExitStack() self._stack.enter_context(V.set_graph_handler(self._graph)) def tearDown(self): self._stack.close() super().tearDown() def test_bucketize_dependencies_no_sorter(self): offsets = self._create_buffer("offsets", (1025,), torch.int32) def inner_fn(index): idx = index[0] return ops.bucketize( values=idx, boundaries=( offsets.get_name(), offsets.get_size()[-1], offsets.get_size()[0] * offsets.get_stride()[0], offsets.get_stride()[-1], ), boundary_indices=0, indexing_dtype=torch.int32, right=True, ) pointwise = Pointwise.create( device=torch.device(GPU_TYPE), dtype=torch.int32, inner_fn=inner_fn, ranges=[1024 * 4], ) self.assertEqual(len(pointwise.get_reads()), 1) def test_bucketize_dependencies_sorter(self): offsets = self._create_buffer("offsets", (1025,), torch.int32) sorter = self._create_buffer("sorter", (1025,), torch.int32) def inner_fn(index): idx = index[0] return ops.bucketize( values=idx, boundaries=( offsets.get_name(), offsets.get_size()[-1], offsets.get_size()[0] * offsets.get_stride()[0], offsets.get_stride()[-1], ), boundary_indices=0, indexing_dtype=torch.int32, right=True, sorter=( sorter.get_name(), sorter.get_stride()[-1], ), sorter_indices=0, ) pointwise = Pointwise.create( device=torch.device(GPU_TYPE), dtype=torch.int32, inner_fn=inner_fn, ranges=[1024 * 4], ) self.assertEqual(len(pointwise.get_reads()), 2) def test_get_offset(self): x = sympy_index_symbol("x") y = sympy_index_symbol("y") var_ranges = { x: 1024, y: 2048, } dep1 = MemoryDep( "dep1", x * 2048 + y, list(var_ranges.keys()), list(var_ranges.values()), ) dep2 = MemoryDep( "dep2", x * 2048 + y + 1024, list(var_ranges.keys()), list(var_ranges.values()), ) self.assertEqual(dep1.get_offset(), 0) self.assertEqual(dep2.get_offset(), 1024) def test_normalize_with_stride_order_equal(self): x = sympy_index_symbol("x") y = sympy_index_symbol("y") loop_order1 = MemoryDep( "access_the_same_buffer", x * 2048 + y, [x, y], [1024, 2048], ) loop_order2 = MemoryDep( "access_the_same_buffer", x * 2048 + y, [y, x], [2048, 1024], ) self.assertTrue(loop_order1 != loop_order2) normalized_loop_order1 = loop_order1.normalize_with_stride_order() normalized_loop_order2 = loop_order2.normalize_with_stride_order() self.assertTrue(normalized_loop_order1 == normalized_loop_order2) def test_normalize_with_stride_order_unequal(self): x = sympy_index_symbol("x") y = sympy_index_symbol("y") loop_order1 = MemoryDep( "access_the_same_buffer", x * 2048 + y, [x, y], [1024, 2048], ) loop_order2 = MemoryDep( "access_the_same_buffer", x * 2048 + y + 5, [y, x], [2048, 1024], ) self.assertTrue(loop_order1 != loop_order2) normalized_loop_order1 = loop_order1.normalize_with_stride_order() normalized_loop_order2 = loop_order2.normalize_with_stride_order() # unequal due to different offset self.assertTrue(normalized_loop_order1 != normalized_loop_order2) if __name__ == "__main__": from torch._inductor.test_case import run_tests if HAS_CPU and HAS_GPU: run_tests("sympy")
TestDependencies
python
viewflow__viewflow
tests/workflow/test_managers__sql.py
{ "start": 260, "end": 7867 }
class ____(TestCase): # noqa: D101 maxDiff = None def test_process_queryset_filter_by_flow_class_succeed(self): queryset = managers.ProcessQuerySet(model=Process).filter(flow_class=ChildFlow) self.assertEqual( str(queryset.query).strip(), 'SELECT "viewflow_process"."id", "viewflow_process"."flow_class", "viewflow_process"."status",' ' "viewflow_process"."created", "viewflow_process"."finished", "viewflow_process"."data",' ' "viewflow_process"."parent_task_id", "viewflow_process"."seed_content_type_id",' ' "viewflow_process"."seed_object_id", "viewflow_process"."artifact_content_type_id",' ' "viewflow_process"."artifact_object_id" FROM "viewflow_process"' ' WHERE "viewflow_process"."flow_class" = tests/workflow.test_managers__sql.ChildFlow' ' ORDER BY "viewflow_process"."created" DESC', ) def test_process_queryset_coerce_for_query(self): queryset = managers.ProcessQuerySet(model=Process).coerce_for([ChildFlow]) self.assertEqual( sqlparse.format(str(queryset.query), reindent=True), 'SELECT "viewflow_process"."id",\n' ' "viewflow_process"."flow_class",\n' ' "viewflow_process"."status",\n' ' "viewflow_process"."created",\n' ' "viewflow_process"."finished",\n' ' "viewflow_process"."data",\n' ' "viewflow_process"."parent_task_id",\n' ' "viewflow_process"."seed_content_type_id",\n' ' "viewflow_process"."seed_object_id",\n' ' "viewflow_process"."artifact_content_type_id",\n' ' "viewflow_process"."artifact_object_id",\n' ' "tests_childprocess"."process_ptr_id",\n' ' "tests_childprocess"."comment"\n' 'FROM "viewflow_process"\n' 'LEFT OUTER JOIN "tests_childprocess" ON ("viewflow_process"."id" = "tests_childprocess"."process_ptr_id")\n' 'WHERE "viewflow_process"."flow_class" IN (tests/workflow.test_managers__sql.ChildFlow)\n' 'ORDER BY "viewflow_process"."created" DESC', ) def test_process_queryset_coerce_classes(self): process1 = ChildProcess.objects.create(flow_class=ChildFlow) process2 = GrandChildProcess.objects.create(flow_class=GrandChildFlow) with self.assertNumQueries(1): queryset = managers.ProcessQuerySet(model=Process).coerce_for( [GrandChildFlow, ChildFlow] ) self.assertEqual(set(queryset), set([process1, process2])) def test_process_queryset_coerce_values_list(self): process = ChildProcess.objects.create(flow_class=ChildFlow) queryset = ( managers.ProcessQuerySet(model=Process) .coerce_for([ChildFlow]) .values_list("id") ) self.assertEqual([(process.pk,)], list(queryset)) def test_process_queryset_prefetch_related(self): process = ChildProcess.objects.create(flow_class=ChildFlow) # process -> participants queryset = ( managers.ProcessQuerySet(model=Process) .coerce_for([ChildFlow]) .prefetch_related( Prefetch("participants", queryset=User.objects.filter(is_staff=True)) ) ) self.assertEqual([process], list(queryset)) self.assertEqual([], list(queryset[0].participants.filter(is_staff=True))) # participants -> processes queryset = User.objects.filter(is_staff=True).prefetch_related( Prefetch("childprocess", queryset=ChildProcess.objects.all()) ) self.assertEqual([], list(queryset)) def test_task_queryset_filter_by_flow_class_succeed(self): queryset = managers.TaskQuerySet(model=Task).filter(flow_task=ChildFlow.start) self.assertEqual( str(queryset.query).strip(), 'SELECT "viewflow_task"."id", "viewflow_task"."flow_task", "viewflow_task"."flow_task_type",' ' "viewflow_task"."status", "viewflow_task"."created", "viewflow_task"."assigned",' ' "viewflow_task"."started", "viewflow_task"."finished", "viewflow_task"."token",' ' "viewflow_task"."external_task_id", "viewflow_task"."owner_id",' ' "viewflow_task"."owner_permission", "viewflow_task"."owner_permission_content_type_id",' ' "viewflow_task"."owner_permission_obj_pk", "viewflow_task"."process_id",' ' "viewflow_task"."data", "viewflow_task"."seed_content_type_id",' ' "viewflow_task"."seed_object_id", "viewflow_task"."artifact_content_type_id",' ' "viewflow_task"."artifact_object_id"' ' FROM "viewflow_task"' ' WHERE "viewflow_task"."flow_task" = tests/workflow.test_managers__sql.ChildFlow.start' ' ORDER BY "viewflow_task"."created" DESC', ) def test_task_queryset_coerce_for_query(self): queryset = managers.TaskQuerySet(model=Task).coerce_for([ChildFlow]) self.assertEqual( queryset.query.select_related, {"childtask": {}, "process": {}} ) """ Became broken under django 1.6 if file test_views_base have viewflow imports! self.assertEqual(str(queryset.query).strip(), 'SELECT "viewflow_task"."id", "viewflow_task"."flow_task", "viewflow_task"."flow_task_type",' ' "viewflow_task"."status", "viewflow_task"."created", "viewflow_task"."started",' ' "viewflow_task"."finished", "viewflow_task"."token", "viewflow_task"."process_id",' ' "viewflow_task"."owner_id", "viewflow_task"."external_task_id",' ' "viewflow_task"."owner_permission", "viewflow_task"."comments", "viewflow_process"."id",' ' "viewflow_process"."flow_class", "viewflow_process"."status", "viewflow_process"."created",' ' "viewflow_process"."finished", "tests_childtask"."task_ptr_id", "tests_childtask"."due_date"' ' FROM "viewflow_task"' ' INNER JOIN "viewflow_process" ON ( "viewflow_task"."process_id" = "viewflow_process"."id" )' ' LEFT OUTER JOIN "tests_childtask" ON ( "viewflow_task"."id" = "tests_childtask"."task_ptr_id" )' ' WHERE "viewflow_process"."flow_class" IN (tests/workflow.test_managers__sql.ChildFlow)') """ def test_task_queryset_coerce_classes(self): process1 = ChildProcess.objects.create(flow_class=ChildFlow) process2 = GrandChildProcess.objects.create(flow_class=GrandChildFlow) task1 = ChildTask.objects.create(process=process1, flow_task=ChildFlow.start) task2 = Task.objects.create(process=process2, flow_task=GrandChildFlow.start) with self.assertNumQueries(1): queryset = managers.TaskQuerySet(model=Task).coerce_for( [GrandChildFlow, ChildFlow] ) self.assertEqual(set(queryset), set([task1, task2])) def test_task_queryset_coerce_values_list(self): process = ChildProcess.objects.create(flow_class=ChildFlow) task = ChildTask.objects.create(process=process, flow_task=ChildFlow.start) queryset = ( managers.TaskQuerySet(model=Task).coerce_for([ChildFlow]).values_list("id") ) self.assertEqual([(task.pk,)], list(queryset))
Test
python
kamyu104__LeetCode-Solutions
Python/number-of-strings-that-appear-as-substrings-in-word.py
{ "start": 2987, "end": 4308 }
class ____(object): def numOfStrings(self, patterns, word): """ :type patterns: List[str] :type word: str :rtype: int """ def getPrefix(pattern): prefix = [-1]*len(pattern) j = -1 for i in xrange(1, len(pattern)): while j != -1 and pattern[j+1] != pattern[i]: j = prefix[j] if pattern[j+1] == pattern[i]: j += 1 prefix[i] = j return prefix def kmp(text, pattern): if not pattern: return 0 prefix = getPrefix(pattern) if len(text) < len(pattern): return -1 j = -1 for i in xrange(len(text)): while j != -1 and pattern[j+1] != text[i]: j = prefix[j] if pattern[j+1] == text[i]: j += 1 if j+1 == len(pattern): return i-j return -1 return sum(kmp(word, pattern) != -1 for pattern in patterns) # Time: O(n * m * l), n is the number of patterns # , l is the max length of patterns # , m is the length of word # Space: O(1) # built-in solution
Solution2
python
sympy__sympy
sympy/geometry/line.py
{ "start": 79437, "end": 80397 }
class ____(LinearEntity3D, Segment): """A line segment in a 3D space. Parameters ========== p1 : Point3D p2 : Point3D Attributes ========== length : number or SymPy expression midpoint : Point3D See Also ======== sympy.geometry.point.Point3D, Line3D Examples ======== >>> from sympy import Point3D, Segment3D >>> Segment3D((1, 0, 0), (1, 1, 1)) # tuples are interpreted as pts Segment3D(Point3D(1, 0, 0), Point3D(1, 1, 1)) >>> s = Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7)); s Segment3D(Point3D(4, 3, 9), Point3D(1, 1, 7)) >>> s.points (Point3D(4, 3, 9), Point3D(1, 1, 7)) >>> s.length sqrt(17) >>> s.midpoint Point3D(5/2, 2, 8) """ def __new__(cls, p1, p2, **kwargs): p1 = Point(p1, dim=3) p2 = Point(p2, dim=3) if p1 == p2: return p1 return LinearEntity3D.__new__(cls, p1, p2, **kwargs)
Segment3D
python
dagster-io__dagster
python_modules/dagster/dagster/_core/asset_graph_view/serializable_entity_subset.py
{ "start": 1772, "end": 7358 }
class ____(Generic[T_EntityKey]): """Represents a serializable subset of a given EntityKey.""" key: T_EntityKey value: EntitySubsetValue @classmethod def from_coercible_value( cls, key: T_EntityKey, value: CoercibleToAssetEntitySubsetValue, partitions_def: Optional[PartitionsDefinition], ) -> "SerializableEntitySubset": """Creates a new SerializableEntitySubset, handling coercion of a CoercibleToAssetEntitySubsetValue to an EntitySubsetValue. """ if value is None: check.invariant( partitions_def is None, "Cannot create a SerializableEntitySubset with value=None and non-None partitions_def", ) return cls(key=key, value=True) if isinstance(value, str): partitions_def = check.not_none(partitions_def) if partitions_def.partitions_subset_class is not DefaultPartitionsSubset: # DefaultPartitionsSubset just adds partition keys to a set, but other subsets # may require partition keys be part of the partition, so validate the key with partition_loading_context() as ctx: partitions_def.validate_partition_key(value, context=ctx) partitions_subset = partitions_def.subset_with_partition_keys([value]) elif isinstance(value, PartitionsSubset): if partitions_def is not None: check.inst_param( value, "value", partitions_def.partitions_subset_class, ) partitions_subset = value else: check.list_param(value, "value", of_type=str) partitions_def = check.not_none(partitions_def) partitions_subset = partitions_def.subset_with_partition_keys(value) return cls(key=key, value=partitions_subset) @classmethod def try_from_coercible_value( cls, key: T_EntityKey, value: CoercibleToAssetEntitySubsetValue, partitions_def: Optional[PartitionsDefinition], ) -> Optional["SerializableEntitySubset"]: """Attempts to create a new SerializableEntitySubset, handling coercion of a CoercibleToAssetEntitySubsetValue and partitions definition to an EntitySubsetValue. Returns None if the coercion fails. """ try: return cls.from_coercible_value(key, value, partitions_def) except: return None @property def is_partitioned(self) -> bool: return not isinstance(self.value, bool) @property def bool_value(self) -> bool: return check.inst(self.value, bool) @property def subset_value(self) -> PartitionsSubset: return check.inst(self.value, PartitionsSubset) @property def size(self) -> int: if not self.is_partitioned: return int(self.bool_value) else: return len(self.subset_value) @property def is_empty(self) -> bool: if self.is_partitioned: return self.subset_value.is_empty else: return not self.bool_value def is_compatible_with_partitions_def( self, partitions_def: Optional[PartitionsDefinition] ) -> bool: if self.is_partitioned: # for some PartitionSubset types, we have access to the underlying partitions # definitions, so we can ensure those are identical if isinstance(self.value, (TimeWindowPartitionsSubset, AllPartitionsSubset)): return self.value.partitions_def == partitions_def # for KeyRangesPartitionsSubset, we have the PartitionsSnap, so we can use that elif isinstance(self.value, KeyRangesPartitionsSubset): if ( partitions_def is None or PartitionsSnap.from_def(partitions_def) != self.value.partitions_snap ): return False # all ranges must be valid return all( partitions_def.has_partition_key(r.start) and partitions_def.has_partition_key(r.end) for r in self.value.key_ranges ) else: return partitions_def is not None else: return partitions_def is None def _oper(self, other: Self, oper: Callable[..., Any]) -> Self: check.invariant(self.key == other.key, "Keys must match for operation") value = oper(self.value, other.value) return self.__class__(key=self.key, value=value) def compute_difference(self, other: Self) -> Self: if isinstance(self.value, bool): value = self.bool_value and not other.bool_value return self.__class__(key=self.key, value=value) else: return self._oper(other, operator.sub) def compute_union(self, other: Self) -> Self: return self._oper(other, operator.or_) def compute_intersection(self, other: Self) -> Self: return self._oper(other, operator.and_) def __contains__(self, item: AssetKeyPartitionKey) -> bool: if not self.is_partitioned: return item.asset_key == self.key and item.partition_key is None and self.bool_value else: return item.asset_key == self.key and item.partition_key in self.subset_value def __repr__(self) -> str: return f"{self.__class__.__name__}<{self.key}>({self.value})"
SerializableEntitySubset
python
great-expectations__great_expectations
tests/core/test_validation_definition.py
{ "start": 18773, "end": 35709 }
class ____: ds_name = "my_ds" asset_name = "my_asset" batch_definition_name = "my_batch_definition" suite_name = "my_suite" validation_definition_name = "my_validation" @pytest.fixture def context(self, in_memory_runtime_context: EphemeralDataContext) -> EphemeralDataContext: return in_memory_runtime_context @pytest.fixture def validation_definition_data( self, context: EphemeralDataContext, ) -> tuple[PandasDatasource, CSVAsset, BatchDefinition]: ds = context.data_sources.add_pandas(self.ds_name) asset = ds.add_csv_asset(self.asset_name, "data.csv") batch_definition = asset.add_batch_definition(self.batch_definition_name) return ds, asset, batch_definition @pytest.fixture def validation_definition_suite(self, context: EphemeralDataContext) -> ExpectationSuite: return context.suites.add(ExpectationSuite(self.suite_name)) @pytest.mark.unit def test_validation_definition_serialization( self, in_memory_runtime_context: EphemeralDataContext, validation_definition_data: tuple[PandasDatasource, CSVAsset, BatchDefinition], validation_definition_suite: ExpectationSuite, ): context = in_memory_runtime_context pandas_ds, csv_asset, batch_definition = validation_definition_data ds_id = str(uuid.uuid4()) pandas_ds.id = ds_id asset_id = str(uuid.uuid4()) csv_asset.id = asset_id batch_definition_id = str(uuid.uuid4()) batch_definition.id = batch_definition_id suite_id = str(uuid.uuid4()) validation_definition_suite.id = suite_id validation_definition = context.validation_definitions.add( ValidationDefinition( name=self.validation_definition_name, data=batch_definition, suite=validation_definition_suite, ) ) actual = json.loads(validation_definition.json(models_as_dict=False)) expected = { "name": self.validation_definition_name, "data": { "datasource": { "name": pandas_ds.name, "id": ds_id, }, "asset": { "name": csv_asset.name, "id": asset_id, }, "batch_definition": { "name": batch_definition.name, "id": batch_definition_id, }, }, "suite": { "name": validation_definition_suite.name, "id": suite_id, }, "id": mock.ANY, } assert actual == expected assert actual["id"] is not None def _assert_contains_valid_uuid(self, data: dict): id = data.pop("id") data["id"] = mock.ANY try: uuid.UUID(id) except ValueError: pytest.fail(f"Expected {id} to be a valid UUID") @pytest.mark.unit def test_validation_definition_deserialization_success( self, context: EphemeralDataContext, validation_definition_data: tuple[PandasDatasource, CSVAsset, BatchDefinition], validation_definition_suite: ExpectationSuite, ): _, _, batch_definition = validation_definition_data serialized_config = { "name": self.validation_definition_name, "data": { "datasource": { "name": self.ds_name, "id": None, }, "asset": { "name": self.asset_name, "id": None, }, "batch_definition": { "name": self.batch_definition_name, "id": None, }, }, "suite": { "name": validation_definition_suite.name, "id": validation_definition_suite.id, }, "id": None, } validation_definition = ValidationDefinition.parse_obj(serialized_config) assert validation_definition.name == self.validation_definition_name assert validation_definition.data == batch_definition assert validation_definition.suite == validation_definition_suite @pytest.mark.unit @pytest.mark.parametrize( "serialized_config, error_substring", [ pytest.param( { "name": validation_definition_name, "data": { "asset": { "name": asset_name, "id": None, }, "batch_definition": { "name": batch_definition_name, "id": None, }, }, "suite": { "name": suite_name, "id": None, }, "id": None, }, "data did not contain expected identifiers", id="bad_data_format[missing_datasource]", ), pytest.param( { "name": validation_definition_name, "data": {}, "suite": { "name": suite_name, "id": None, }, "id": None, }, "data did not contain expected identifiers", id="bad_data_format[empty_field]", ), pytest.param( { "name": validation_definition_name, "data": { "datasource": { "name": ds_name, "id": None, }, "asset": { "name": asset_name, "id": None, }, "batch_definition": { "name": batch_definition_name, "id": None, }, }, "suite": {}, "id": None, }, "suite did not contain expected identifiers", id="bad_suite_format", ), ], ) def test_validation_definition_deserialization_bad_format( self, context: EphemeralDataContext, serialized_config: dict, error_substring: str ): with pytest.raises(ValueError, match=f"{error_substring}*."): ValidationDefinition.parse_obj(serialized_config) @pytest.mark.unit @pytest.mark.parametrize( "serialized_config, error_substring", [ pytest.param( { "name": validation_definition_name, "data": { "datasource": { "name": ds_name, "id": None, }, "asset": { "name": asset_name, "id": None, }, "batch_definition": { "name": batch_definition_name, "id": None, }, }, "suite": { "name": "i_do_not_exist", "id": None, }, "id": None, }, "Could not find suite", id="non_existant_suite", ), pytest.param( { "name": validation_definition_name, "data": { "datasource": { "name": "i_do_not_exist", "id": None, }, "asset": { "name": asset_name, "id": None, }, "batch_definition": { "name": batch_definition_name, "id": None, }, }, "suite": { "name": suite_name, "id": None, }, "id": None, }, "Could not find datasource", id="non_existant_datasource", ), pytest.param( { "name": validation_definition_name, "data": { "datasource": { "name": ds_name, "id": None, }, "asset": { "name": "i_do_not_exist", "id": None, }, "batch_definition": { "name": batch_definition_name, "id": None, }, }, "suite": { "name": suite_name, "id": None, }, "id": None, }, "Could not find asset", id="non_existant_asset", ), pytest.param( { "name": validation_definition_name, "data": { "datasource": { "name": ds_name, "id": None, }, "asset": { "name": asset_name, "id": None, }, "batch_definition": { "name": "i_do_not_exist", "id": None, }, }, "suite": { "name": suite_name, "id": None, }, "id": None, }, "Could not find batch definition", id="non_existant_batch_definition", ), ], ) def test_validation_definition_deserialization_non_existant_resource( self, validation_definition_data: tuple[PandasDatasource, CSVAsset, BatchDefinition], validation_definition_suite: ExpectationSuite, serialized_config: dict, error_substring: str, ): with pytest.raises(ValueError, match=f"{error_substring}*."): ValidationDefinition.parse_obj(serialized_config) @pytest.mark.unit def test_identifier_bundle_no_id_raises_error(validation_definition: ValidationDefinition): validation_definition.id = None with pytest.raises(ValidationDefinitionRelatedResourcesFreshnessError): validation_definition.identifier_bundle() @pytest.mark.unit def test_save_success(mocker: MockerFixture, validation_definition: ValidationDefinition): context = mocker.Mock(spec=AbstractDataContext) set_context(project=context) store_key = context.validation_definition_store.get_key.return_value validation_definition.save() context.validation_definition_store.update.assert_called_once_with( key=store_key, value=validation_definition ) @pytest.mark.parametrize( "has_id,has_suite_id,has_batch_def_id,error_list", [ pytest.param( True, True, True, [], id="validation_id|suite_id|batch_def_id", ), pytest.param( True, False, True, [ExpectationSuiteNotAddedError], id="validation_id|no_suite_id|batch_def_id", ), pytest.param( True, True, False, [BatchDefinitionNotAddedError], id="validation_id|suite_id|no_batch_def_id", ), pytest.param( True, False, False, [BatchDefinitionNotAddedError, ExpectationSuiteNotAddedError], id="validation_id|no_suite_id|no_batch_def_id", ), pytest.param( False, True, True, [ValidationDefinitionNotAddedError], id="no_validation_id|suite_id|batch_def_id", ), pytest.param( False, False, True, [ExpectationSuiteNotAddedError, ValidationDefinitionNotAddedError], id="no_validation_id|no_suite_id|batch_def_id", ), pytest.param( False, True, False, [BatchDefinitionNotAddedError, ValidationDefinitionNotAddedError], id="no_validation_id|suite_id|no_batch_def_id", ), pytest.param( False, False, False, [ BatchDefinitionNotAddedError, ExpectationSuiteNotAddedError, ValidationDefinitionNotAddedError, ], id="no_validation_id|no_suite_id|no_batch_def_id", ), ], ) @pytest.mark.unit def test_is_fresh( in_memory_runtime_context, has_id: bool, has_suite_id: bool, has_batch_def_id: bool, error_list: list[Type[ResourceFreshnessError]], ): context = in_memory_runtime_context batch_definition = ( context.data_sources.add_pandas(name="my_pandas_ds") .add_csv_asset(name="my_csv_asset", filepath_or_buffer="data.csv") .add_batch_definition(name="my_batch_def") ) suite = context.suites.add(ExpectationSuite(name="my_suite")) validation_definition = context.validation_definitions.add( ValidationDefinition( name="my_validation_definition", suite=suite, data=batch_definition, ) ) # Stores/Fluent API will always assign IDs but we manually override them here # for purposes of changing object state for the test if not has_batch_def_id: validation_definition.data.id = None if not has_suite_id: validation_definition.suite.id = None if not has_id: validation_definition.id = None diagnostics = validation_definition.is_fresh() try: diagnostics.raise_for_error() except ResourceFreshnessAggregateError as e: assert [type(err) for err in e.errors] == error_list @pytest.mark.unit def test_is_fresh_raises_error_when_validation_definition_not_found(in_memory_runtime_context): context = in_memory_runtime_context batch_definition = ( context.data_sources.add_pandas(name="my_pandas_ds") .add_csv_asset(name="my_csv_asset", filepath_or_buffer="data.csv") .add_batch_definition(name="my_batch_def") ) suite = context.suites.add(ExpectationSuite(name="my_suite")) validation_definition = context.validation_definitions.add( ValidationDefinition( name="my_validation_definition", suite=suite, data=batch_definition, ) ) context.validation_definitions.delete(validation_definition.name) diagnostics = validation_definition.is_fresh() assert diagnostics.success is False assert len(diagnostics.errors) == 1 assert isinstance(diagnostics.errors[0], ValidationDefinitionNotFoundError) @pytest.mark.unit def test_is_fresh_raises_error_when_child_deps_not_found(in_memory_runtime_context): context = in_memory_runtime_context datasource = context.data_sources.add_pandas(name="my_pandas_ds") asset = datasource.add_csv_asset(name="my_csv_asset", filepath_or_buffer="data.csv") batch_definition = asset.add_batch_definition(name="my_batch_def") suite = context.suites.add(ExpectationSuite(name="my_suite")) validation_definition = context.validation_definitions.add( ValidationDefinition( name="my_validation_definition", suite=suite, data=batch_definition, ) ) asset.delete_batch_definition(batch_definition.name) context.suites.delete(suite.name) diagnostics = validation_definition.is_fresh() assert diagnostics.success is False assert len(diagnostics.errors) == 2 assert isinstance(diagnostics.errors[0], BatchDefinitionNotFoundError) assert isinstance(diagnostics.errors[1], ExpectationSuiteNotFoundError)
TestValidationDefinitionSerialization
python
getsentry__sentry
src/sentry/middleware/access_log.py
{ "start": 726, "end": 6570 }
class ____: request_start_time: float def get_request_duration(self) -> float: return time.time() - self.request_start_time def _get_request_auth(request: Request) -> AuthenticatedToken | str | None: if request.path_info.startswith(settings.ANONYMOUS_STATIC_PREFIXES): return None # may not be present if request was rejected by a middleware between this # and the auth middleware return getattr(request, "auth", None) def _get_token_name(auth: AuthenticatedToken | None) -> str | None: if auth is None: return None elif isinstance(auth, AuthenticatedToken): return auth.kind else: raise AssertionError(f"unreachable: {auth}") def _get_rate_limit_stats_dict(request: Request) -> dict[str, str | int | None]: rate_limit_metadata: RateLimitMeta | None = getattr(request, "rate_limit_metadata", None) snuba_rate_limit_metadata: SnubaRateLimitMeta | None = getattr( request, "snuba_rate_limit_metadata", None ) rate_limit_type = "DNE" if rate_limit_metadata: rate_limit_type = rate_limit_metadata.rate_limit_type.value if snuba_rate_limit_metadata: rate_limit_type = "snuba" rate_limit_stats = { "rate_limit_type": rate_limit_type, "concurrent_limit": getattr(rate_limit_metadata, "concurrent_limit", None), "concurrent_requests": getattr(rate_limit_metadata, "concurrent_requests", None), "reset_time": getattr(rate_limit_metadata, "reset_time", None), "group": getattr(rate_limit_metadata, "group", None), "limit": getattr(rate_limit_metadata, "limit", None), "remaining": getattr(rate_limit_metadata, "remaining", None), # We prefix the snuba fields with snuba_ to avoid confusion with the standard rate limit metadata "snuba_policy": getattr(snuba_rate_limit_metadata, "policy", None), "snuba_quota_unit": getattr(snuba_rate_limit_metadata, "quota_unit", None), "snuba_quota_used": getattr(snuba_rate_limit_metadata, "quota_used", None), "snuba_rejection_threshold": getattr( snuba_rate_limit_metadata, "rejection_threshold", None ), "snuba_storage_key": getattr(snuba_rate_limit_metadata, "storage_key", None), } return rate_limit_stats def _create_api_access_log( request: Request, response: Response | None, access_log_metadata: _AccessLogMetaData ) -> None: """ Create a log entry to be used for api metrics gathering """ try: if request.resolver_match is None: view = "Unknown" else: view = request.resolver_match._func_path request_auth = _get_request_auth(request) if isinstance(request_auth, str): # RPC authenticator currently set auth to a string. # a) Those are also system tokens and should be ignored. # b) _get_token_name raises on non AuthenticatedToken return token_type = _get_token_name(request_auth) if token_type == "system": # if its an internal request, no need to log return request_user = getattr(request, "user", None) user_id = getattr(request_user, "id", None) is_app = getattr(request_user, "is_sentry_app", None) # TODO: `org_id` is often None even if we should have it # Likely `organization` is not being correctly set in the base endpoints on _request org_id = getattr(getattr(request, "organization", None), "id", None) entity_id = getattr(request_auth, "entity_id", None) status_code = getattr(response, "status_code", 500) log_metrics = dict( method=request.method, view=view, response=status_code, user_id=user_id, is_app=is_app, token_type=token_type, is_frontend_request=is_frontend_request(request), organization_id=org_id, entity_id=entity_id, path=request.path, caller_ip=request.META.get("REMOTE_ADDR"), user_agent=request.META.get("HTTP_USER_AGENT"), rate_limited=getattr(request, "will_be_rate_limited", False), rate_limit_category=getattr(request, "rate_limit_category", None), request_duration_seconds=access_log_metadata.get_request_duration(), **_get_rate_limit_stats_dict(request), ) auth = get_authorization_header(request).split() if len(auth) == 2: log_metrics["token_last_characters"] = force_str(auth[1])[-4:] # Filter out None values and convert remaining values to string log_metrics = {k: str(v) for k, v in log_metrics.items() if v is not None} api_access_logger.info("api.access", extra=log_metrics) metrics.incr("middleware.access_log.created") except Exception: api_access_logger.exception("api.access: Error capturing API access logs") def access_log_middleware( get_response: Callable[[Request], Response], ) -> Callable[[Request], Response]: def middleware(request: Request) -> Response: # NOTE(Vlad): `request.auth|user` are not a simple member accesses, # they make DB calls. For static urls that should not happen. Hence # this middleware is skipped for them. We don't care about its access # that much anyways if not settings.LOG_API_ACCESS: return get_response(request) if request.path_info.startswith(EXCLUSION_PATHS): return get_response(request) access_log_metadata = _AccessLogMetaData(request_start_time=time.time()) response = get_response(request) _create_api_access_log(request, response, access_log_metadata) return response return middleware
_AccessLogMetaData
python
readthedocs__readthedocs.org
readthedocs/core/unresolver.py
{ "start": 1571, "end": 1890 }
class ____(UnresolverError): def __init__(self, project, language, version_slug, filename): self.project = project self.language = language self.filename = filename # The version doesn't exist, so we just have the slug. self.version_slug = version_slug
TranslationNotFoundError
python
airbytehq__airbyte
airbyte-integrations/connectors/source-gong/components.py
{ "start": 380, "end": 1375 }
class ____(DatetimeBasedCursor): def get_request_body_json( self, *, stream_state: Optional[StreamState] = None, stream_slice: Optional[StreamSlice] = None, next_page_token: Optional[Mapping[str, Any]] = None, ) -> Mapping[str, Any]: return self._get_request_filter_options(RequestOptionType.body_json, stream_slice) def _get_request_filter_options(self, option_type: RequestOptionType, stream_slice: Optional[StreamSlice]) -> Mapping[str, Any]: options: MutableMapping[str, Any] = {} if not stream_slice: return options if self.start_time_option and self.start_time_option.inject_into == option_type: field_name, sub_field_name = self.start_time_option.field_name.eval(config=self.config).replace(" ", "").split(",") options[field_name] = {sub_field_name: stream_slice.get(self._partition_field_start.eval(self.config))} return options
IncrementalSingleBodyFilterCursor
python
pallets__quart
src/quart/testing/client.py
{ "start": 1458, "end": 1650 }
class ____: def __init__(self, headers: Headers) -> None: self.headers = headers def info(self) -> _TestWrapper: return _TestWrapper(self.headers)
_TestCookieJarResponse
python
matplotlib__matplotlib
lib/matplotlib/tri/_trifinder.py
{ "start": 91, "end": 730 }
class ____: """ Abstract base class for classes used to find the triangles of a Triangulation in which (x, y) points lie. Rather than instantiate an object of a class derived from TriFinder, it is usually better to use the function `.Triangulation.get_trifinder`. Derived classes implement __call__(x, y) where x and y are array-like point coordinates of the same shape. """ def __init__(self, triangulation): _api.check_isinstance(Triangulation, triangulation=triangulation) self._triangulation = triangulation def __call__(self, x, y): raise NotImplementedError
TriFinder
python
PrefectHQ__prefect
tests/server/schemas/test_actions.py
{ "start": 1316, "end": 6688 }
class ____: def test_create_with_worker_pool_queue_id_warns(self): with pytest.warns( UserWarning, match=( "`worker_pool_queue_id` is no longer supported for creating or updating " "deployments. Please use `work_pool_name` and " "`work_queue_name` instead." ), ): deployment_create = DeploymentCreate( **dict( name="test-deployment", flow_id=uuid4(), worker_pool_queue_id=uuid4(), ) ) assert getattr(deployment_create, "worker_pool_queue_id", 0) == 0 @pytest.mark.parametrize( "kwargs", [ ({"worker_pool_queue_name": "test-worker-pool-queue"}), ({"work_pool_queue_name": "test-work-pool-queue"}), ({"worker_pool_name": "test-worker-pool"}), ], ) def test_create_with_worker_pool_name_warns(self, kwargs): with pytest.warns( UserWarning, match=( "`worker_pool_name`, `worker_pool_queue_name`, and " "`work_pool_name` are" "no longer supported for creating or updating " "deployments. Please use `work_pool_name` and " "`work_queue_name` instead." ), ): deployment_create = DeploymentCreate( **dict(name="test-deployment", flow_id=uuid4(), **kwargs) ) for key in kwargs.keys(): assert getattr(deployment_create, key, 0) == 0 def test_check_valid_configuration_ignores_required_fields(self): """ Deployment actions ignore required fields because we don't know what the final set of job variables will look like until a flow runs. """ deployment_create = DeploymentCreate( name="test-deployment", flow_id=uuid4(), job_variables={}, ) base_job_template = { "variables": { "type": "object", "required": ["my-field"], "properties": { "my-field": { "type": "string", "title": "My Field", }, }, } } # This should pass despite my-field being required deployment_create.check_valid_configuration(base_job_template) # A field with a default value should also pass base_job_template = { "variables": { "type": "object", "required": ["my-field"], "properties": { "my-field": { "type": "string", "title": "My Field", "default": "my-default-for-my-field", }, }, } } deployment_create.check_valid_configuration(base_job_template) # make sure the required fields are still there assert "my-field" in base_job_template["variables"]["required"] # This should also pass base_job_template = { "variables": { "type": "object", "required": ["my-field"], "properties": { "my-field": { "type": "string", "title": "My Field", "default": "my-default-for-my-field", }, }, } } deployment_create = DeploymentUpdate( job_variables={"my_field": "my_value"}, ) deployment_create.check_valid_configuration(base_job_template) def test_validate_concurrency_limits_raises_with_both_limits(self): """Test that validation fails when both concurrency_limit and global_concurrency_limit_id are set""" # Test validation fails when both limits are provided with pytest.raises( ValueError, match="A deployment cannot have both a concurrency limit and a global concurrency limit.", ): DeploymentCreate( name="test-deployment", flow_id=uuid4(), concurrency_limit=5, global_concurrency_limit_id=uuid4(), ) # Test validation passes with just concurrency_limit deployment = DeploymentCreate( name="test-deployment", flow_id=uuid4(), concurrency_limit=5 ) assert deployment.concurrency_limit == 5 assert deployment.global_concurrency_limit_id is None # Test validation passes with just global_concurrency_limit_id global_limit_id = uuid4() deployment = DeploymentCreate( name="test-deployment", flow_id=uuid4(), global_concurrency_limit_id=global_limit_id, ) assert deployment.global_concurrency_limit_id == global_limit_id assert deployment.concurrency_limit is None # Test validation passes with neither limit deployment = DeploymentCreate(name="test-deployment", flow_id=uuid4()) assert deployment.concurrency_limit is None assert deployment.global_concurrency_limit_id is None
TestDeploymentCreate
python
getsentry__sentry
tests/sentry/notifications/notification_action/metric_alert_registry/test_pagerduty_metric_alert_handler.py
{ "start": 921, "end": 7910 }
class ____(MetricAlertHandlerBase): def setUp(self) -> None: self.create_models() self.action = self.create_action( type=Action.Type.PAGERDUTY, integration_id=1234567890, config={"target_identifier": "service123", "target_type": ActionTarget.SPECIFIC}, data={"priority": "default"}, ) self.handler = PagerDutyMetricAlertHandler() @mock.patch("sentry.integrations.pagerduty.utils.send_incident_alert_notification") def test_send_alert(self, mock_send_incident_alert_notification: mock.MagicMock) -> None: notification_context = NotificationContext.from_action_model(self.action) assert self.group_event.occurrence is not None assert self.group_event.occurrence.priority is not None alert_context = AlertContext.from_workflow_engine_models( self.detector, self.evidence_data, self.group_event.group.status, DetectorPriorityLevel(self.group_event.occurrence.priority), ) metric_issue_context = MetricIssueContext.from_group_event( self.group, self.evidence_data, DetectorPriorityLevel(self.group_event.occurrence.priority), ) open_period_context = OpenPeriodContext.from_group(self.group) notification_uuid = str(uuid.uuid4()) self.handler.send_alert( notification_context=notification_context, alert_context=alert_context, metric_issue_context=metric_issue_context, open_period_context=open_period_context, trigger_status=TriggerStatus.ACTIVE, project=self.detector.project, organization=self.detector.project.organization, notification_uuid=notification_uuid, ) mock_send_incident_alert_notification.assert_called_once_with( notification_context=notification_context, alert_context=alert_context, metric_issue_context=metric_issue_context, organization=self.detector.project.organization, notification_uuid=notification_uuid, ) @mock.patch( "sentry.notifications.notification_action.metric_alert_registry.PagerDutyMetricAlertHandler.send_alert" ) def test_invoke_legacy_registry(self, mock_send_alert: mock.MagicMock) -> None: self.handler.invoke_legacy_registry(self.event_data, self.action, self.detector) assert mock_send_alert.call_count == 1 ( notification_context, alert_context, metric_issue_context, open_period_context, organization, notification_uuid, ) = self.unpack_kwargs(mock_send_alert) assert organization == self.detector.project.organization assert isinstance(notification_uuid, str) self.assert_notification_context( notification_context, integration_id=1234567890, target_identifier="service123", target_display=None, sentry_app_config={"priority": "default"}, sentry_app_id=None, ) self.assert_alert_context( alert_context, name=self.detector.name, action_identifier_id=self.detector.id, threshold_type=AlertRuleThresholdType.ABOVE, detection_type=AlertRuleDetectionType.STATIC, comparison_delta=None, alert_threshold=self.evidence_data.conditions[0]["comparison"], ) self.assert_metric_issue_context( metric_issue_context, open_period_identifier=self.open_period.id, snuba_query=self.snuba_query, new_status=IncidentStatus.CRITICAL, metric_value=123.45, group=self.group_event.group, title=self.group_event.group.title, subscription=self.subscription, ) self.assert_open_period_context( open_period_context, id=self.open_period.id, date_started=self.group_event.group.first_seen, date_closed=None, ) assert organization == self.detector.project.organization assert isinstance(notification_uuid, str) @mock.patch( "sentry.notifications.notification_action.metric_alert_registry.PagerDutyMetricAlertHandler.send_alert" ) def test_invoke_legacy_registry_with_activity(self, mock_send_alert: mock.MagicMock) -> None: # Create an Activity instance with evidence data and priority activity_data = asdict(self.evidence_data) activity = Activity( project=self.project, group=self.group, type=ActivityType.SET_RESOLVED.value, data=activity_data, ) activity.save() # Create event data with Activity instead of GroupEvent event_data_with_activity = WorkflowEventData( event=activity, workflow_env=self.workflow.environment, group=self.group, ) self.handler.invoke_legacy_registry(event_data_with_activity, self.action, self.detector) assert mock_send_alert.call_count == 1 ( notification_context, alert_context, metric_issue_context, open_period_context, organization, notification_uuid, ) = self.unpack_kwargs(mock_send_alert) # Verify that the same data is extracted from Activity.data as from GroupEvent.occurrence.evidence_data self.assert_notification_context( notification_context, integration_id=1234567890, target_identifier="service123", target_display=None, sentry_app_config={"priority": "default"}, sentry_app_id=None, ) self.assert_alert_context( alert_context, name=self.detector.name, action_identifier_id=self.detector.id, threshold_type=AlertRuleThresholdType.BELOW, detection_type=AlertRuleDetectionType.STATIC, comparison_delta=None, alert_threshold=self.evidence_data.conditions[2]["comparison"], ) self.assert_metric_issue_context( metric_issue_context, open_period_identifier=self.open_period.id, snuba_query=self.snuba_query, new_status=IncidentStatus.CLOSED, metric_value=123.45, group=self.group, title=self.group.title, subscription=self.subscription, ) self.assert_open_period_context( open_period_context, id=self.open_period.id, date_started=self.group.first_seen, date_closed=None, ) assert organization == self.detector.project.organization assert isinstance(notification_uuid, str)
TestPagerDutyMetricAlertHandler
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py
{ "start": 18349, "end": 19622 }
class ____: """Test parse_assign_public_ip function.""" def test_parse_assign_public_ip_true_fargate(self): """Test parsing assign_public_ip=True for Fargate launch type.""" result = parse_assign_public_ip("True", is_launch_type_ec2=False) assert result == "ENABLED" def test_parse_assign_public_ip_false_fargate(self): """Test parsing assign_public_ip=False for Fargate launch type.""" result = parse_assign_public_ip("False", is_launch_type_ec2=False) assert result == "DISABLED" def test_parse_assign_public_ip_true_ec2(self): """Test parsing assign_public_ip=True for EC2 launch type.""" result = parse_assign_public_ip("True", is_launch_type_ec2=True) assert result is None def test_parse_assign_public_ip_false_ec2(self): """Test parsing assign_public_ip=False for EC2 launch type.""" result = parse_assign_public_ip("False", is_launch_type_ec2=True) assert result is None def test_parse_assign_public_ip_default_fargate(self): """Test parsing assign_public_ip with default for Fargate launch type.""" result = parse_assign_public_ip("False", is_launch_type_ec2=False) assert result == "DISABLED"
TestParseAssignPublicIp
python
tornadoweb__tornado
tornado/test/httpclient_test.py
{ "start": 3033, "end": 3202 }
class ____(RequestHandler): def patch(self): "Return the request payload - so we can check it is being kept" self.write(self.request.body)
PatchHandler
python
google__pytype
pytype/tools/analyze_project/parse_args.py
{ "start": 410, "end": 5152 }
class ____: """Parser with additional functions for config file processing.""" def __init__(self, parser, pytype_single_args): """Initialize a parser. Args: parser: An argparse.ArgumentParser or compatible object pytype_single_args: Iterable of args that will be passed to pytype_single """ self._parser = parser self.pytype_single_args = pytype_single_args self._pytype_arg_map = pytype_config.args_map() def create_initial_args(self, keys): """Creates the initial set of args.""" return argparse.Namespace(**{k: None for k in keys}) def config_from_defaults(self): defaults = self._parser.parse_args([]) self.postprocess(defaults) conf = config.Config(*self.pytype_single_args) conf.populate_from(defaults) return conf def clean_args(self, args, keys): """Clean None values out of the arg namespace. This lets us check for a config file arg based on whether the None default was overwritten. Args: args: an argparse.Namespace. keys: Keys to clean if None """ for k in keys: if getattr(args, k) is None: delattr(args, k) def parse_args(self, argv): """Parses argv. Commandline-only args are parsed normally. File-configurable args appear in the parsed args only if explicitly present in argv. Args: argv: sys.argv[1:] Returns: An argparse.Namespace. """ file_config_names = set(config.ITEMS) | set(self.pytype_single_args) args = self.create_initial_args(file_config_names) self._parser.parse_args(argv, args) self.clean_args(args, file_config_names) self.postprocess(args) return args def convert_strings(self, args: argparse.Namespace): """Converts strings in an args namespace to values.""" for k in self.pytype_single_args: if hasattr(args, k): v = getattr(args, k) assert isinstance(v, str) setattr(args, k, convert_string(v)) def postprocess(self, args: argparse.Namespace): """Postprocesses the subset of pytype_single_args that appear in args. Args: args: an argparse.Namespace. """ names = {k for k in self.pytype_single_args if hasattr(args, k)} opt_map = {k: self._pytype_arg_map[k].long_opt for k in names} pytype_config.Postprocessor(names, opt_map, args).process() def error(self, message): self._parser.error(message) def make_parser(): """Make parser for command line args. Returns: A Parser object. """ parser = argparse.ArgumentParser(usage='%(prog)s [options] input [input ...]') parser.register('action', 'flatten', _FlattenAction) modes = parser.add_mutually_exclusive_group() modes.add_argument( '--tree', dest='tree', action='store_true', default=False, help='Display import tree.') modes.add_argument( '--unresolved', dest='unresolved', action='store_true', default=False, help='Display unresolved dependencies.') modes.add_argument( '--generate-config', dest='generate_config', type=str, action='store', default='', help='Write out a dummy configuration file.') parser.add_argument( '-v', '--verbosity', dest='verbosity', type=int, action='store', default=1, help='Set logging level: 0=ERROR, 1=WARNING (default), 2=INFO.') parser.add_argument( '--config', dest='config', type=str, action='store', default='', help='Configuration file.') parser.add_argument( '--version', action='store_true', dest='version', default=None, help=('Display pytype version and exit.')) # Adds options from the config file. types = config.make_converters() # For nargs=*, argparse calls type() on each arg individually, so # _FlattenAction flattens the list of sets of paths as we go along. for option in [ (('-x', '--exclude'), {'nargs': '*', 'action': 'flatten'}), (('inputs',), {'metavar': 'input', 'nargs': '*', 'action': 'flatten'}), (('-k', '--keep-going'), {'action': 'store_true', 'type': None}), (('-j', '--jobs'), {'action': 'store', 'metavar': 'N'}), (('--platform',),), (('-P', '--pythonpath'),), (('-V', '--python-version'),) ]: _add_file_argument(parser, types, *option) output = parser.add_mutually_exclusive_group() _add_file_argument(output, types, ('-o', '--output')) output.add_argument( '-n', '--no-cache', dest='no_cache', action='store_true', default=False, help='Send pytype output to a temporary directory.') # Adds options from pytype-single. wrapper = datatypes.ParserWrapper(parser) pytype_config.add_basic_options(wrapper) pytype_config.add_feature_flags(wrapper) return Parser(parser, pytype_single_args=wrapper.actions)
Parser
python
pytorch__pytorch
torch/distributions/chi2.py
{ "start": 199, "end": 1153 }
class ____(Gamma): r""" Creates a Chi-squared distribution parameterized by shape parameter :attr:`df`. This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)`` Example:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = Chi2(torch.tensor([1.0])) >>> m.sample() # Chi2 distributed with shape df=1 tensor([ 0.1046]) Args: df (float or Tensor): shape parameter of the distribution """ arg_constraints = {"df": constraints.positive} def __init__( self, df: Union[Tensor, float], validate_args: Optional[bool] = None, ) -> None: super().__init__(0.5 * df, 0.5, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Chi2, _instance) return super().expand(batch_shape, new) @property def df(self) -> Tensor: return self.concentration * 2
Chi2
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/decl_api.py
{ "start": 21687, "end": 30351 }
class ____( # Inspectable is used only by the mypy plugin inspection.Inspectable[InstanceState[Any]], metaclass=DeclarativeAttributeIntercept, ): """Base class used for declarative class definitions. The :class:`_orm.DeclarativeBase` allows for the creation of new declarative bases in such a way that is compatible with type checkers:: from sqlalchemy.orm import DeclarativeBase class Base(DeclarativeBase): pass The above ``Base`` class is now usable as the base for new declarative mappings. The superclass makes use of the ``__init_subclass__()`` method to set up new classes and metaclasses aren't used. When first used, the :class:`_orm.DeclarativeBase` class instantiates a new :class:`_orm.registry` to be used with the base, assuming one was not provided explicitly. The :class:`_orm.DeclarativeBase` class supports class-level attributes which act as parameters for the construction of this registry; such as to indicate a specific :class:`_schema.MetaData` collection as well as a specific value for :paramref:`_orm.registry.type_annotation_map`:: from typing import Annotated from sqlalchemy import BigInteger from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy.orm import DeclarativeBase bigint = Annotated[int, "bigint"] my_metadata = MetaData() class Base(DeclarativeBase): metadata = my_metadata type_annotation_map = { str: String().with_variant(String(255), "mysql", "mariadb"), bigint: BigInteger(), } Class-level attributes which may be specified include: :param metadata: optional :class:`_schema.MetaData` collection. If a :class:`_orm.registry` is constructed automatically, this :class:`_schema.MetaData` collection will be used to construct it. Otherwise, the local :class:`_schema.MetaData` collection will supercede that used by an existing :class:`_orm.registry` passed using the :paramref:`_orm.DeclarativeBase.registry` parameter. :param type_annotation_map: optional type annotation map that will be passed to the :class:`_orm.registry` as :paramref:`_orm.registry.type_annotation_map`. :param registry: supply a pre-existing :class:`_orm.registry` directly. .. versionadded:: 2.0 Added :class:`.DeclarativeBase`, so that declarative base classes may be constructed in such a way that is also recognized by :pep:`484` type checkers. As a result, :class:`.DeclarativeBase` and other subclassing-oriented APIs should be seen as superseding previous "class returned by a function" APIs, namely :func:`_orm.declarative_base` and :meth:`_orm.registry.generate_base`, where the base class returned cannot be recognized by type checkers without using plugins. **__init__ behavior** In a plain Python class, the base-most ``__init__()`` method in the class hierarchy is ``object.__init__()``, which accepts no arguments. However, when the :class:`_orm.DeclarativeBase` subclass is first declared, the class is given an ``__init__()`` method that links to the :paramref:`_orm.registry.constructor` constructor function, if no ``__init__()`` method is already present; this is the usual declarative constructor that will assign keyword arguments as attributes on the instance, assuming those attributes are established at the class level (i.e. are mapped, or are linked to a descriptor). This constructor is **never accessed by a mapped class without being called explicitly via super()**, as mapped classes are themselves given an ``__init__()`` method directly which calls :paramref:`_orm.registry.constructor`, so in the default case works independently of what the base-most ``__init__()`` method does. .. versionchanged:: 2.0.1 :class:`_orm.DeclarativeBase` has a default constructor that links to :paramref:`_orm.registry.constructor` by default, so that calls to ``super().__init__()`` can access this constructor. Previously, due to an implementation mistake, this default constructor was missing, and calling ``super().__init__()`` would invoke ``object.__init__()``. The :class:`_orm.DeclarativeBase` subclass may also declare an explicit ``__init__()`` method which will replace the use of the :paramref:`_orm.registry.constructor` function at this level:: class Base(DeclarativeBase): def __init__(self, id=None): self.id = id Mapped classes still will not invoke this constructor implicitly; it remains only accessible by calling ``super().__init__()``:: class MyClass(Base): def __init__(self, id=None, name=None): self.name = name super().__init__(id=id) Note that this is a different behavior from what functions like the legacy :func:`_orm.declarative_base` would do; the base created by those functions would always install :paramref:`_orm.registry.constructor` for ``__init__()``. """ if typing.TYPE_CHECKING: def _sa_inspect_type(self) -> Mapper[Self]: ... def _sa_inspect_instance(self) -> InstanceState[Self]: ... _sa_registry: ClassVar[_RegistryType] registry: ClassVar[_RegistryType] """Refers to the :class:`_orm.registry` in use where new :class:`_orm.Mapper` objects will be associated.""" metadata: ClassVar[MetaData] """Refers to the :class:`_schema.MetaData` collection that will be used for new :class:`_schema.Table` objects. .. seealso:: :ref:`orm_declarative_metadata` """ __name__: ClassVar[str] # this ideally should be Mapper[Self], but mypy as of 1.4.1 does not # like it, and breaks the declared_attr_one test. Pyright/pylance is # ok with it. __mapper__: ClassVar[Mapper[Any]] """The :class:`_orm.Mapper` object to which a particular class is mapped. May also be acquired using :func:`_sa.inspect`, e.g. ``inspect(klass)``. """ __table__: ClassVar[FromClause] """The :class:`_sql.FromClause` to which a particular subclass is mapped. This is usually an instance of :class:`_schema.Table` but may also refer to other kinds of :class:`_sql.FromClause` such as :class:`_sql.Subquery`, depending on how the class is mapped. .. seealso:: :ref:`orm_declarative_metadata` """ # pyright/pylance do not consider a classmethod a ClassVar so use Any # https://github.com/microsoft/pylance-release/issues/3484 __tablename__: Any """String name to assign to the generated :class:`_schema.Table` object, if not specified directly via :attr:`_orm.DeclarativeBase.__table__`. .. seealso:: :ref:`orm_declarative_table` """ __mapper_args__: Any """Dictionary of arguments which will be passed to the :class:`_orm.Mapper` constructor. .. seealso:: :ref:`orm_declarative_mapper_options` """ __table_args__: Any """A dictionary or tuple of arguments that will be passed to the :class:`_schema.Table` constructor. See :ref:`orm_declarative_table_configuration` for background on the specific structure of this collection. .. seealso:: :ref:`orm_declarative_table_configuration` """ def __init__(self, **kw: Any): ... def __init_subclass__(cls, **kw: Any) -> None: if DeclarativeBase in cls.__bases__: _check_not_declarative(cls, DeclarativeBase) _setup_declarative_base(cls) else: _ORMClassConfigurator._as_declarative( cls._sa_registry, cls, cls.__dict__ ) super().__init_subclass__(**kw) def _check_not_declarative(cls: Type[Any], base: Type[Any]) -> None: cls_dict = cls.__dict__ if ( "__table__" in cls_dict and not ( callable(cls_dict["__table__"]) or hasattr(cls_dict["__table__"], "__get__") ) ) or isinstance(cls_dict.get("__tablename__", None), str): raise exc.InvalidRequestError( f"Cannot use {base.__name__!r} directly as a declarative base " "class. Create a Base by creating a subclass of it." )
DeclarativeBase
python
astropy__astropy
astropy/coordinates/attributes.py
{ "start": 13491, "end": 15409 }
class ____(Attribute): """ A frame attribute that can act as a `~astropy.coordinates.EarthLocation`. It can be created as anything that can be transformed to the `~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation` when accessed after creation. Parameters ---------- default : object Default value for the attribute if not provided secondary_attribute : str Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. doc : str Description of the frame attribute for help and documentation """ def convert_input(self, value): """ Checks that the input is a Quantity with the necessary units (or the special value ``0``). Parameters ---------- value : object Input value to be converted. Returns ------- out, converted : correctly-typed object, boolean Tuple consisting of the correctly-typed object and a boolean which indicates if conversion was actually performed. Raises ------ ValueError If the input is not valid for this attribute. """ if value is None: return None, False elif isinstance(value, EarthLocation): return value, False else: # we have to do the import here because of some tricky circular deps from .builtin_frames import ITRS if not hasattr(value, "transform_to"): raise ValueError( f'"{value}" was passed into an EarthLocationAttribute, but it does' ' not have "transform_to" method' ) itrsobj = value.transform_to(ITRS()) return itrsobj.earth_location, True
EarthLocationAttribute
python
getsentry__sentry
tests/sentry/users/api/endpoints/test_userroles_details.py
{ "start": 1504, "end": 2120 }
class ____(UserRolesDetailsTest): method = "PUT" def test_simple(self) -> None: role1 = self.create_user_role(name="test-role", permissions=["users.edit"]) role2 = self.create_user_role(name="test-role2", permissions=["users.edit"]) resp = self.get_response("test-role", permissions=["users.admin"]) assert resp.status_code == 200 role1 = UserRole.objects.get(id=role1.id) assert role1.permissions == ["users.admin"] role2 = UserRole.objects.get(id=role2.id) assert role2.permissions == ["users.edit"] @control_silo_test
UserRolesDetailsPutTest
python
has2k1__plotnine
plotnine/themes/themeable.py
{ "start": 24067, "end": 24409 }
class ____(themeable): """ How to align the plot caption Parameters ---------- theme_element : Literal["panel", "plot"], default = "panel" If "panel", the caption is aligned with respect to the panels. If "plot", it is aligned with the plot, excluding the margin space. """
plot_caption_position
python
astropy__astropy
docs/wcs/examples/planetary_wcs.py
{ "start": 359, "end": 682 }
class ____(BaseCoordinateFrame): name = "Mars" frame = MARSCustomBodyFrame() frame.representation_type = MARSCustomBodycentricRepresentation mywcs = celestial_frame_to_wcs(frame, projection="CAR") print(mywcs.wcs.ctype) print(mywcs.wcs.name) print(mywcs.wcs.aux.a_radius) print(mywcs.wcs.aux.c_radius)
MARSCustomBodyFrame
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1525981, "end": 1529175 }
class ____( sgqlc.types.Type, Node, Comment, Deletable, Reactable, Subscribable, UniformResourceLocatable, Updatable, UpdatableComment ): """A team discussion.""" __schema__ = github_schema __field_names__ = ( "body_version", "comments", "comments_resource_path", "comments_url", "is_pinned", "is_private", "number", "team", "title", "viewer_can_pin", ) body_version = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="bodyVersion") """Identifies the discussion body hash.""" comments = sgqlc.types.Field( sgqlc.types.non_null(TeamDiscussionCommentConnection), graphql_name="comments", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ("order_by", sgqlc.types.Arg(TeamDiscussionCommentOrder, graphql_name="orderBy", default=None)), ("from_comment", sgqlc.types.Arg(Int, graphql_name="fromComment", default=None)), ) ), ) """A list of comments on this discussion. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. * `order_by` (`TeamDiscussionCommentOrder`): Order for connection * `from_comment` (`Int`): When provided, filters the connection such that results begin with the comment with this number. """ comments_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="commentsResourcePath") """The HTTP path for discussion comments""" comments_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="commentsUrl") """The HTTP URL for discussion comments""" is_pinned = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPinned") """Whether or not the discussion is pinned.""" is_private = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPrivate") """Whether or not the discussion is only visible to team members and org admins. """ number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number") """Identifies the discussion within its team.""" team = sgqlc.types.Field(sgqlc.types.non_null(Team), graphql_name="team") """The team that defines the context of this discussion.""" title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title") """The title of the discussion""" viewer_can_pin = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanPin") """Whether or not the current viewer can pin this discussion."""
TeamDiscussion
python
mlflow__mlflow
mlflow/data/code_dataset_source.py
{ "start": 115, "end": 880 }
class ____(DatasetSource): def __init__( self, tags: dict[Any, Any], ): self._tags = tags @staticmethod def _get_source_type() -> str: return "code" def load(self, **kwargs): """ Load is not implemented for Code Dataset Source. """ raise NotImplementedError @staticmethod def _can_resolve(raw_source: Any): return False @classmethod def _resolve(cls, raw_source: str) -> Self: raise NotImplementedError def to_dict(self) -> dict[Any, Any]: return {"tags": self._tags} @classmethod def from_dict(cls, source_dict: dict[Any, Any]) -> Self: return cls( tags=source_dict.get("tags"), )
CodeDatasetSource
python
getsentry__sentry
src/sentry/preprod/size_analysis/models.py
{ "start": 1550, "end": 1778 }
class ____(BaseModel): diff_items: list[DiffItem] size_metric_diff_item: SizeMetricDiffItem skipped_diff_item_comparison: bool head_analysis_version: str | None base_analysis_version: str | None
ComparisonResults
python
getsentry__sentry
src/sentry/api/serializers/models/project_template.py
{ "start": 622, "end": 832 }
class ____(TypedDict, total=False): id: int name: str createdAt: datetime | None updatedAt: datetime | None options: TProjectOptions | None @register(ProjectTemplate)
SerializedProjectTemplate
python
viewflow__viewflow
viewflow/forms/renderers.py
{ "start": 3080, "end": 3507 }
class ____(WidgetRenderer): tag = "input" def create_root(self, context): parent = self.root self.root = ElementTree.SubElement(self.root, "div") for subwidget in context["widget"]["subwidgets"]: child = super().create_root({"widget": subwidget}) child.attrib["type"] = "hidden" root, self.root = self.root, parent return root
MultipleHiddenInputRenderer
python
apache__thrift
lib/py/src/protocol/TCompactProtocol.py
{ "start": 3006, "end": 13217 }
class ____(TProtocolBase): """Compact implementation of the Thrift protocol driver.""" PROTOCOL_ID = 0x82 VERSION = 1 VERSION_MASK = 0x1f TYPE_MASK = 0xe0 TYPE_BITS = 0x07 TYPE_SHIFT_AMOUNT = 5 def __init__(self, trans, string_length_limit=None, container_length_limit=None): TProtocolBase.__init__(self, trans) self.state = CLEAR self.__last_fid = 0 self.__bool_fid = None self.__bool_value = None self.__structs = [] self.__containers = [] self.string_length_limit = string_length_limit self.container_length_limit = container_length_limit def _check_string_length(self, length): self._check_length(self.string_length_limit, length) def _check_container_length(self, length): self._check_length(self.container_length_limit, length) def __writeVarint(self, n): writeVarint(self.trans, n) def writeMessageBegin(self, name, type, seqid): assert self.state == CLEAR self.__writeUByte(self.PROTOCOL_ID) self.__writeUByte(self.VERSION | (type << self.TYPE_SHIFT_AMOUNT)) # The sequence id is a signed 32-bit integer but the compact protocol # writes this out as a "var int" which is always positive, and attempting # to write a negative number results in an infinite loop, so we may # need to do some conversion here... tseqid = seqid if tseqid < 0: tseqid = 2147483648 + (2147483648 + tseqid) self.__writeVarint(tseqid) self.__writeBinary(bytes(name, 'utf-8')) self.state = VALUE_WRITE def writeMessageEnd(self): assert self.state == VALUE_WRITE self.state = CLEAR def writeStructBegin(self, name): assert self.state in (CLEAR, CONTAINER_WRITE, VALUE_WRITE), self.state self.__structs.append((self.state, self.__last_fid)) self.state = FIELD_WRITE self.__last_fid = 0 def writeStructEnd(self): assert self.state == FIELD_WRITE self.state, self.__last_fid = self.__structs.pop() def writeFieldStop(self): self.__writeByte(0) def __writeFieldHeader(self, type, fid): delta = fid - self.__last_fid if 0 < delta <= 15: self.__writeUByte(delta << 4 | type) else: self.__writeByte(type) self.__writeI16(fid) self.__last_fid = fid def writeFieldBegin(self, name, type, fid): assert self.state == FIELD_WRITE, self.state if type == TType.BOOL: self.state = BOOL_WRITE self.__bool_fid = fid else: self.state = VALUE_WRITE self.__writeFieldHeader(CTYPES[type], fid) def writeFieldEnd(self): assert self.state in (VALUE_WRITE, BOOL_WRITE), self.state self.state = FIELD_WRITE def __writeUByte(self, byte): self.trans.write(pack('!B', byte)) def __writeByte(self, byte): self.trans.write(pack('!b', byte)) def __writeI16(self, i16): self.__writeVarint(makeZigZag(i16, 16)) def __writeSize(self, i32): self.__writeVarint(i32) def writeCollectionBegin(self, etype, size): assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state if size <= 14: self.__writeUByte(size << 4 | CTYPES[etype]) else: self.__writeUByte(0xf0 | CTYPES[etype]) self.__writeSize(size) self.__containers.append(self.state) self.state = CONTAINER_WRITE writeSetBegin = writeCollectionBegin writeListBegin = writeCollectionBegin def writeMapBegin(self, ktype, vtype, size): assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state if size == 0: self.__writeByte(0) else: self.__writeSize(size) self.__writeUByte(CTYPES[ktype] << 4 | CTYPES[vtype]) self.__containers.append(self.state) self.state = CONTAINER_WRITE def writeCollectionEnd(self): assert self.state == CONTAINER_WRITE, self.state self.state = self.__containers.pop() writeMapEnd = writeCollectionEnd writeSetEnd = writeCollectionEnd writeListEnd = writeCollectionEnd def writeBool(self, bool): if self.state == BOOL_WRITE: if bool: ctype = CompactType.TRUE else: ctype = CompactType.FALSE self.__writeFieldHeader(ctype, self.__bool_fid) elif self.state == CONTAINER_WRITE: if bool: self.__writeByte(CompactType.TRUE) else: self.__writeByte(CompactType.FALSE) else: raise AssertionError("Invalid state in compact protocol") writeByte = writer(__writeByte) writeI16 = writer(__writeI16) @writer def writeI32(self, i32): self.__writeVarint(makeZigZag(i32, 32)) @writer def writeI64(self, i64): self.__writeVarint(makeZigZag(i64, 64)) @writer def writeDouble(self, dub): self.trans.write(pack('<d', dub)) def __writeBinary(self, s): self.__writeSize(len(s)) self.trans.write(s) writeBinary = writer(__writeBinary) def readFieldBegin(self): assert self.state == FIELD_READ, self.state type = self.__readUByte() if type & 0x0f == TType.STOP: return (None, 0, 0) delta = type >> 4 if delta == 0: fid = self.__readI16() else: fid = self.__last_fid + delta self.__last_fid = fid type = type & 0x0f if type == CompactType.TRUE: self.state = BOOL_READ self.__bool_value = True elif type == CompactType.FALSE: self.state = BOOL_READ self.__bool_value = False else: self.state = VALUE_READ return (None, self.__getTType(type), fid) def readFieldEnd(self): assert self.state in (VALUE_READ, BOOL_READ), self.state self.state = FIELD_READ def __readUByte(self): result, = unpack('!B', self.trans.readAll(1)) return result def __readByte(self): result, = unpack('!b', self.trans.readAll(1)) return result def __readVarint(self): return readVarint(self.trans) def __readZigZag(self): return fromZigZag(self.__readVarint()) def __readSize(self): result = self.__readVarint() if result < 0: raise TProtocolException("Length < 0") return result def readMessageBegin(self): assert self.state == CLEAR proto_id = self.__readUByte() if proto_id != self.PROTOCOL_ID: raise TProtocolException(TProtocolException.BAD_VERSION, 'Bad protocol id in the message: %d' % proto_id) ver_type = self.__readUByte() type = (ver_type >> self.TYPE_SHIFT_AMOUNT) & self.TYPE_BITS version = ver_type & self.VERSION_MASK if version != self.VERSION: raise TProtocolException(TProtocolException.BAD_VERSION, 'Bad version: %d (expect %d)' % (version, self.VERSION)) seqid = self.__readVarint() # the sequence is a compact "var int" which is treaded as unsigned, # however the sequence is actually signed... if seqid > 2147483647: seqid = -2147483648 - (2147483648 - seqid) name = self.__readBinary().decode('utf-8') return (name, type, seqid) def readMessageEnd(self): assert self.state == CLEAR assert len(self.__structs) == 0 def readStructBegin(self): assert self.state in (CLEAR, CONTAINER_READ, VALUE_READ), self.state self.__structs.append((self.state, self.__last_fid)) self.state = FIELD_READ self.__last_fid = 0 def readStructEnd(self): assert self.state == FIELD_READ self.state, self.__last_fid = self.__structs.pop() def readCollectionBegin(self): assert self.state in (VALUE_READ, CONTAINER_READ), self.state size_type = self.__readUByte() size = size_type >> 4 type = self.__getTType(size_type) if size == 15: size = self.__readSize() self._check_container_length(size) self.__containers.append(self.state) self.state = CONTAINER_READ return type, size readSetBegin = readCollectionBegin readListBegin = readCollectionBegin def readMapBegin(self): assert self.state in (VALUE_READ, CONTAINER_READ), self.state size = self.__readSize() self._check_container_length(size) types = 0 if size > 0: types = self.__readUByte() vtype = self.__getTType(types) ktype = self.__getTType(types >> 4) self.__containers.append(self.state) self.state = CONTAINER_READ return (ktype, vtype, size) def readCollectionEnd(self): assert self.state == CONTAINER_READ, self.state self.state = self.__containers.pop() readSetEnd = readCollectionEnd readListEnd = readCollectionEnd readMapEnd = readCollectionEnd def readBool(self): if self.state == BOOL_READ: return self.__bool_value == CompactType.TRUE elif self.state == CONTAINER_READ: return self.__readByte() == CompactType.TRUE else: raise AssertionError("Invalid state in compact protocol: %d" % self.state) readByte = reader(__readByte) __readI16 = __readZigZag readI16 = reader(__readZigZag) readI32 = reader(__readZigZag) readI64 = reader(__readZigZag) @reader def readDouble(self): buff = self.trans.readAll(8) val, = unpack('<d', buff) return val def __readBinary(self): size = self.__readSize() self._check_string_length(size) return self.trans.readAll(size) readBinary = reader(__readBinary) def __getTType(self, byte): return TTYPES[byte & 0x0f]
TCompactProtocol
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 251153, "end": 253638 }
class ____(Response): """ Response of tasks.dequeue endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict :param dequeued: Number of tasks dequeued (0 or 1) :type dequeued: int """ _service = "tasks" _action = "dequeue" _version = "2.23" _schema = { "definitions": {}, "properties": { "dequeued": { "description": "Number of tasks dequeued (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__(self, updated=None, fields=None, dequeued=None, **kwargs): super(DequeueResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields self.dequeued = dequeued @schema_property("updated") def updated(self): return self._property_updated @updated.setter def updated(self, value): if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self): return self._property_fields @fields.setter def fields(self, value): if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value @schema_property("dequeued") def dequeued(self): return self._property_dequeued @dequeued.setter def dequeued(self, value): if value is None: self._property_dequeued = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "dequeued", six.integer_types) self._property_dequeued = value
DequeueResponse
python
readthedocs__readthedocs.org
readthedocs/api/v3/views.py
{ "start": 16222, "end": 17218 }
class ____( APIv3Settings, FlexFieldsMixin, ListModelMixin, RetrieveModelMixin, UpdateMixin, UpdateModelMixin, GenericViewSet, ): """ Endpoint to return all the notifications related to the logged in user. Hitting this endpoint while logged in will return notifications attached to: - User making the request - Organizations where the user is owner/member - Projects where the user is admin/member """ model = Notification serializer_class = NotificationSerializer # Override global permissions here because it doesn't not make sense to hit # this endpoint without being logged in. We can't use our # ``CommonPermissions`` because it requires the endpoint to be nested under # ``projects`` permission_classes = (IsAuthenticated,) filterset_class = NotificationFilter def get_queryset(self): return Notification.objects.for_user(self.request.user, resource="all")
NotificationsForUserViewSet
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/hooks/dms.py
{ "start": 1054, "end": 1237 }
class ____(str, Enum): """Available AWS DMS Task Waiter statuses.""" DELETED = "deleted" READY = "ready" RUNNING = "running" STOPPED = "stopped"
DmsTaskWaiterStatus
python
pandas-dev__pandas
pandas/tests/indexing/test_coercion.py
{ "start": 18556, "end": 25603 }
class ____(CoercionBase): # not indexing, but place here for consistency method = "fillna" def _assert_fillna_conversion(self, original, value, expected, expected_dtype): """test coercion triggered by fillna""" target = original.copy() res = target.fillna(value) tm.assert_equal(res, expected) assert res.dtype == expected_dtype @pytest.mark.parametrize( "fill_val, fill_dtype", [(1, object), (1.1, object), (1 + 1j, object), (True, object)], ) def test_fillna_object(self, index_or_series, fill_val, fill_dtype): klass = index_or_series obj = klass(["a", np.nan, "c", "d"], dtype=object) assert obj.dtype == object exp = klass(["a", fill_val, "c", "d"], dtype=object) self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) @pytest.mark.parametrize( "fill_val,fill_dtype", [(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)], ) def test_fillna_float64(self, index_or_series, fill_val, fill_dtype): klass = index_or_series obj = klass([1.1, np.nan, 3.3, 4.4]) assert obj.dtype == np.float64 exp = klass([1.1, fill_val, 3.3, 4.4]) self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) @pytest.mark.parametrize( "fill_val,fill_dtype", [ (1, np.complex128), (1.1, np.complex128), (1 + 1j, np.complex128), (True, object), ], ) def test_fillna_complex128(self, index_or_series, fill_val, fill_dtype): klass = index_or_series obj = klass([1 + 1j, np.nan, 3 + 3j, 4 + 4j], dtype=np.complex128) assert obj.dtype == np.complex128 exp = klass([1 + 1j, fill_val, 3 + 3j, 4 + 4j]) self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) @pytest.mark.parametrize( "fill_val,fill_dtype", [ (pd.Timestamp("2012-01-01").as_unit("s"), "datetime64[s]"), (pd.Timestamp("2012-01-01", tz="US/Eastern").as_unit("s"), object), (1, object), ("x", object), ], ids=["datetime64", "datetime64tz", "object", "object"], ) def test_fillna_datetime(self, index_or_series, fill_val, fill_dtype): klass = index_or_series obj = klass( [ pd.Timestamp("2011-01-01").as_unit("s"), pd.NaT, pd.Timestamp("2011-01-03").as_unit("s"), pd.Timestamp("2011-01-04").as_unit("s"), ] ) assert obj.dtype == "datetime64[s]" exp = klass( [ pd.Timestamp("2011-01-01").as_unit("s"), fill_val, pd.Timestamp("2011-01-03").as_unit("s"), pd.Timestamp("2011-01-04").as_unit("s"), ] ) self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) @pytest.mark.parametrize( "fill_val,fill_dtype", [ ( pd.Timestamp("2012-01-01", tz="US/Eastern").as_unit("s"), "datetime64[s, US/Eastern]", ), (pd.Timestamp("2012-01-01").as_unit("s"), object), # pre-2.0 with a mismatched tz we would get object result ( pd.Timestamp("2012-01-01", tz="Asia/Tokyo").as_unit("s"), "datetime64[s, US/Eastern]", ), (1, object), ("x", object), ], ) def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype): klass = index_or_series tz = "US/Eastern" obj = klass( [ pd.Timestamp("2011-01-01", tz=tz).as_unit("s"), pd.NaT, pd.Timestamp("2011-01-03", tz=tz).as_unit("s"), pd.Timestamp("2011-01-04", tz=tz).as_unit("s"), ] ) assert obj.dtype == "datetime64[s, US/Eastern]" if getattr(fill_val, "tz", None) is None: fv = fill_val else: fv = fill_val.tz_convert(tz) exp = klass( [ pd.Timestamp("2011-01-01", tz=tz).as_unit("s"), fv, pd.Timestamp("2011-01-03", tz=tz).as_unit("s"), pd.Timestamp("2011-01-04", tz=tz).as_unit("s"), ] ) self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) @pytest.mark.parametrize( "fill_val", [ 1, 1.1, 1 + 1j, True, pd.Interval(1, 2, closed="left"), pd.Timestamp("2012-01-01", tz="US/Eastern").as_unit("s"), pd.Timestamp("2012-01-01").as_unit("s"), pd.Timedelta(days=1), pd.Period("2016-01-01", "D"), ], ) def test_fillna_interval(self, index_or_series, fill_val): ii = pd.interval_range(1.0, 5.0, closed="right").insert(1, np.nan) assert isinstance(ii.dtype, pd.IntervalDtype) obj = index_or_series(ii) exp = index_or_series([ii[0], fill_val, ii[2], ii[3], ii[4]], dtype=object) fill_dtype = object self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) @pytest.mark.xfail(reason="Test not implemented") def test_fillna_series_int64(self): raise NotImplementedError @pytest.mark.xfail(reason="Test not implemented") def test_fillna_index_int64(self): raise NotImplementedError @pytest.mark.xfail(reason="Test not implemented") def test_fillna_series_bool(self): raise NotImplementedError @pytest.mark.xfail(reason="Test not implemented") def test_fillna_index_bool(self): raise NotImplementedError @pytest.mark.xfail(reason="Test not implemented") def test_fillna_series_timedelta64(self): raise NotImplementedError @pytest.mark.parametrize( "fill_val", [ 1, 1.1, 1 + 1j, True, pd.Interval(1, 2, closed="left"), pd.Timestamp("2012-01-01", tz="US/Eastern").as_unit("s"), pd.Timestamp("2012-01-01").as_unit("s"), pd.Timedelta(days=1), pd.Period("2016-01-01", "W"), ], ) def test_fillna_series_period(self, index_or_series, fill_val): pi = pd.period_range("2016-01-01", periods=4, freq="D").insert(1, pd.NaT) assert isinstance(pi.dtype, pd.PeriodDtype) obj = index_or_series(pi) exp = index_or_series([pi[0], fill_val, pi[2], pi[3], pi[4]], dtype=object) fill_dtype = object self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype) @pytest.mark.xfail(reason="Test not implemented") def test_fillna_index_timedelta64(self): raise NotImplementedError @pytest.mark.xfail(reason="Test not implemented") def test_fillna_index_period(self): raise NotImplementedError
TestFillnaSeriesCoercion
python
ansible__ansible
lib/ansible/plugins/become/sudo.py
{ "start": 3022, "end": 4809 }
class ____(BecomeBase): name = 'sudo' # messages for detecting prompted password issues fail = ('Sorry, try again.',) missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required') def build_become_command(self, cmd, shell): super(BecomeModule, self).build_become_command(cmd, shell) if not cmd: return cmd becomecmd = self.get_option('become_exe') or self.name flags = self.get_option('become_flags') or '' prompt = '' if self.get_option('become_pass'): self.prompt = '[sudo via ansible, key=%s] password:' % self._id if flags: # this could be simplified, but kept as is for now for backwards string matching reflag = [] for flag in shlex.split(flags): if flag in ('-n', '--non-interactive'): continue elif not flag.startswith('--'): # handle -XnxxX flags only flag = re.sub(r'^(-\w*)n(\w*.*)', r'\1\2', flag) reflag.append(flag) flags = shlex.join(reflag) prompt = '-p "%s"' % (self.prompt) user = self.get_option('become_user') or '' if user: user = '-u %s' % (user) if chdir := self.get_option('sudo_chdir'): try: becomecmd = f'{shell.CD} {shlex.quote(chdir)} {shell._SHELL_AND} {becomecmd}' except AttributeError as ex: raise AnsibleError(f'The {shell._load_name!r} shell plugin does not support sudo chdir. It is missing the {ex.name!r} attribute.') return ' '.join([becomecmd, flags, prompt, user, self._build_success_command(cmd, shell)])
BecomeModule
python
scrapy__scrapy
tests/test_downloadermiddleware.py
{ "start": 10250, "end": 11706 }
class ____(TestManagerBase): @deferred_f_from_coro_f async def test_download_func_spider_arg(self): req = Request("http://example.com/index.html") resp = Response(req.url, status=200) def download_func(request: Request, spider: Spider) -> Deferred[Response]: return succeed(resp) async with self.get_mwman() as mwman: with pytest.warns( ScrapyDeprecationWarning, match="The spider argument of download_func is deprecated", ): ret = await maybe_deferred_to_future(mwman.download(download_func, req)) assert isinstance(ret, Response) @deferred_f_from_coro_f async def test_mwman_download_spider_arg(self): req = Request("http://example.com/index.html") resp = Response(req.url, status=200) def download_func(request: Request) -> Deferred[Response]: return succeed(resp) async with self.get_mwman() as mwman: with pytest.warns( ScrapyDeprecationWarning, match=r"Passing a spider argument to DownloaderMiddlewareManager.download\(\)" r" is deprecated and the passed value is ignored.", ): ret = await maybe_deferred_to_future( mwman.download(download_func, req, mwman.crawler.spider) ) assert isinstance(ret, Response)
TestDownloadDeprecated
python
realpython__materials
web-scraping-with-scrapy-and-mongodb/books/books/middlewares.py
{ "start": 241, "end": 1917 }
class ____: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, or item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Request or item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info("Spider opened: %s" % spider.name)
BooksSpiderMiddleware
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pep8_naming/N802.py
{ "start": 220, "end": 515 }
class ____(unittest.TestCase): def tearDown(self): return super().tearDown() def testTest(self): assert True from typing import override, overload @override def BAD_FUNC(): pass @overload def BAD_FUNC(): pass import ast from ast import NodeTransformer
Test
python
numpy__numpy
tools/swig/test/testVector.py
{ "start": 12747, "end": 13018 }
class ____(VectorTestCase): def __init__(self, methodName="runTest"): VectorTestCase.__init__(self, methodName) self.typeStr = "longLong" self.typeCode = "q" ######################################################################
longLongTestCase
python
django__django
tests/urlpatterns_reverse/test_localeregexdescriptor.py
{ "start": 416, "end": 2490 }
class ____(SimpleTestCase): def setUp(self): translation.trans_real._translations = {} def tearDown(self): translation.trans_real._translations = {} def test_translated_regex_compiled_per_language(self): provider = RegexPattern(translation.gettext_lazy("^foo/$")) with translation.override("de"): de_compiled = provider.regex # compiled only once per language error = AssertionError( "tried to compile url regex twice for the same language" ) with mock.patch("django.urls.resolvers.re.compile", side_effect=error): de_compiled_2 = provider.regex with translation.override("fr"): fr_compiled = provider.regex self.assertEqual(fr_compiled.pattern, "^foo-fr/$") self.assertEqual(de_compiled.pattern, "^foo-de/$") self.assertEqual(de_compiled, de_compiled_2) def test_nontranslated_regex_compiled_once(self): provider = RegexPattern("^foo/$") with translation.override("de"): de_compiled = provider.regex with translation.override("fr"): # compiled only once, regardless of language error = AssertionError("tried to compile non-translated url regex twice") with mock.patch("django.urls.resolvers.re.compile", side_effect=error): fr_compiled = provider.regex self.assertEqual(de_compiled.pattern, "^foo/$") self.assertEqual(fr_compiled.pattern, "^foo/$") def test_regex_compile_error(self): """Regex errors are re-raised as ImproperlyConfigured.""" provider = RegexPattern("*") msg = '"*" is not a valid regular expression: nothing to repeat' with self.assertRaisesMessage(ImproperlyConfigured, msg): provider.regex def test_access_locale_regex_descriptor(self): self.assertIsInstance(RegexPattern.regex, LocaleRegexDescriptor) @override_settings(LOCALE_PATHS=[Path(here) / "translations" / "locale"])
LocaleRegexDescriptorTests
python
PyCQA__pylint
tests/functional/u/unused/unused_argument.py
{ "start": 1563, "end": 2494 }
class ____: """dummy class""" def method(self, arg): # [unused-argument] """dummy method""" print(self) def __init__(self, *unused_args, **unused_kwargs): pass @classmethod def selected(cls, *args, **kwargs): # [unused-argument, unused-argument] """called by the registry when the vobject has been selected. """ return cls def using_inner_function(self, etype, size=1): """return a fake result set for a particular entity type""" rset = AAAA([('A',)]*size, f'{etype} X', description=[(etype,)]*size) def inner(row, col=0, etype=etype, req=self, rset=rset): """inner using all its argument""" # pylint: disable=maybe-no-member return req.vreg.etype_class(etype)(req, rset, row, col) # pylint: disable = attribute-defined-outside-init rset.get_entity = inner
AAAA
python
django__django
tests/model_fields/test_jsonfield.py
{ "start": 45841, "end": 50687 }
class ____(TestCase): def test_repr(self): self.assertEqual(repr(JSONNull()), "JSONNull()") def test_deconstruct(self): jsonnull = JSONNull() path, args, kwargs = jsonnull.deconstruct() self.assertEqual(path, "django.db.models.JSONNull") self.assertEqual(args, ()) self.assertEqual(kwargs, {}) def test_save_load(self): obj = JSONModel(value=JSONNull()) obj.save() self.assertIsNone(obj.value) def test_create(self): obj = JSONModel.objects.create(value=JSONNull()) self.assertIsNone(obj.value) def test_update(self): obj = JSONModel.objects.create(value={"key": "value"}) JSONModel.objects.update(value=JSONNull()) obj.refresh_from_db() self.assertIsNone(obj.value) def test_filter(self): json_null = NullableJSONModel.objects.create(value=JSONNull()) sql_null = NullableJSONModel.objects.create(value=None) self.assertSequenceEqual( [json_null], NullableJSONModel.objects.filter(value=JSONNull()) ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__isnull=True), [sql_null] ) def test_bulk_update(self): obj1 = NullableJSONModel.objects.create(value={"k": "1st"}) obj2 = NullableJSONModel.objects.create(value={"k": "2nd"}) obj1.value = JSONNull() obj2.value = JSONNull() NullableJSONModel.objects.bulk_update([obj1, obj2], fields=["value"]) self.assertSequenceEqual( NullableJSONModel.objects.filter(value=JSONNull()), [obj1, obj2], ) def test_case_expression_with_jsonnull_then(self): obj = JSONModel.objects.create(value={"key": "value"}) JSONModel.objects.filter(pk=obj.pk).update( value=Case( When(value={"key": "value"}, then=JSONNull()), ) ) obj.refresh_from_db() self.assertIsNone(obj.value) def test_case_expr_with_jsonnull_condition(self): obj = NullableJSONModel.objects.create(value=JSONNull()) NullableJSONModel.objects.filter(pk=obj.pk).update( value=Case( When( value=JSONNull(), then=Value({"key": "replaced"}, output_field=JSONField()), ) ), ) obj.refresh_from_db() self.assertEqual(obj.value, {"key": "replaced"}) def test_key_transform_exact_filter(self): obj = NullableJSONModel.objects.create(value={"key": None}) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__key=JSONNull()), [obj], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__key=None), [obj] ) def test_index_lookup(self): obj = NullableJSONModel.objects.create(value=["a", "b", None, 3]) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__2=JSONNull()), [obj] ) self.assertSequenceEqual(NullableJSONModel.objects.filter(value__2=None), [obj]) @skipUnlessDBFeature("supports_table_check_constraints") def test_constraint_validation(self): constraint = CheckConstraint( condition=~Q(value=JSONNull()), name="check_not_json_null" ) constraint.validate(NullableJSONModel, NullableJSONModel(value={"key": None})) msg = f"Constraint “{constraint.name}” is violated." with self.assertRaisesMessage(ValidationError, msg): constraint.validate(NullableJSONModel, NullableJSONModel(value=JSONNull())) @skipUnlessDBFeature("supports_table_check_constraints") def test_constraint_validation_key_transform(self): constraint = CheckConstraint( condition=Q(value__has_key="name") & ~Q(value__name=JSONNull()), name="check_value_name_not_json_null", ) constraint.validate( NullableJSONModel, NullableJSONModel(value={"name": "Django"}) ) msg = f"Constraint “{constraint.name}” is violated." with self.assertRaisesMessage(ValidationError, msg): constraint.validate( NullableJSONModel, NullableJSONModel(value={"name": None}) ) def test_default(self): obj = JSONNullDefaultModel.objects.create() self.assertIsNone(obj.value) def test_custom_jsonnull_encoder(self): obj = JSONNullDefaultModel.objects.create( value={"name": JSONNull(), "array": [1, JSONNull()]} ) obj.refresh_from_db() self.assertIsNone(obj.value["name"]) self.assertEqual(obj.value["array"], [1, None]) # RemovedInDjango70Warning. @skipUnlessDBFeature("supports_primitives_in_json_field")
JSONNullTests
python
Textualize__textual
src/textual/css/_style_properties.py
{ "start": 37859, "end": 38859 }
class ____: """Descriptor for getting transitions properties""" def __get__( self, obj: StylesBase, objtype: type[StylesBase] | None = None ) -> dict[str, Transition]: """Get a mapping of properties to the transitions applied to them. Args: obj: The ``Styles`` object. objtype: The ``Styles`` class. Returns: A ``dict`` mapping property names to the ``Transition`` applied to them. e.g. ``{"offset": Transition(...), ...}``. If no transitions have been set, an empty ``dict`` is returned. """ return obj.get_rule("transitions", {}) # type: ignore[return-value] def __set__( self, obj: StylesBase, transitions: dict[str, Transition] | None ) -> None: _rich_traceback_omit = True if transitions is None: obj.clear_rule("transitions") else: obj.set_rule("transitions", transitions.copy())
TransitionsProperty
python
google__jax
tests/array_api_test.py
{ "start": 4187, "end": 6919 }
class ____(jtu.JaxTestCase): info = ARRAY_API_NAMESPACE.__array_namespace_info__() def setUp(self): super().setUp() self._boolean = self.build_dtype_dict(["bool"]) self._signed = self.build_dtype_dict(["int8", "int16", "int32"]) self._unsigned = self.build_dtype_dict(["uint8", "uint16", "uint32"]) self._floating = self.build_dtype_dict(["float32"]) self._complex = self.build_dtype_dict(["complex64"]) if config.enable_x64.value: self._signed["int64"] = jnp.dtype("int64") self._unsigned["uint64"] = jnp.dtype("uint64") self._floating["float64"] = jnp.dtype("float64") self._complex["complex128"] = jnp.dtype("complex128") self._integral = self._signed | self._unsigned self._numeric = ( self._signed | self._unsigned | self._floating | self._complex ) def build_dtype_dict(self, dtypes): out = {} for name in dtypes: out[name] = jnp.dtype(name) return out def test_capabilities_info(self): capabilities = self.info.capabilities() assert not capabilities["boolean indexing"] assert not capabilities["data-dependent shapes"] assert capabilities["max dimensions"] == 64 def test_default_device_info(self): assert self.info.default_device() is None def test_devices_info(self): devices = set(self.info.devices()) assert None in devices for backend in xb.backends(): assert devices.issuperset(jax.devices(backend)) def test_default_dtypes_info(self): _default_dtypes = { "real floating": "f", "complex floating": "c", "integral": "i", "indexing": "i", } target_dict = { dtype_name: default_types.get(kind)() for dtype_name, kind in _default_dtypes.items() } assert self.info.default_dtypes() == target_dict @parameterized.parameters( "bool", "signed integer", "real floating", "complex floating", "integral", "numeric", None, (("real floating", "complex floating"),), (("integral", "signed integer"),), (("integral", "bool"),), ) def test_dtypes_info(self, kind): info_dict = self.info.dtypes(kind=kind) control = { "bool":self._boolean, "signed integer":self._signed, "unsigned integer":self._unsigned, "real floating":self._floating, "complex floating":self._complex, "integral": self._integral, "numeric": self._numeric } target_dict = {} if kind is None: target_dict = control["numeric"] | self._boolean elif isinstance(kind, tuple): target_dict = {} for _kind in kind: target_dict |= control[_kind] else: target_dict = control[kind] assert info_dict == target_dict
ArrayAPIInspectionUtilsTest
python
django__django
tests/admin_inlines/models.py
{ "start": 6118, "end": 6322 }
class ____(models.Model): """ Model added for ticket 19838 """ chapter = models.ForeignKey(Chapter, models.PROTECT) note = models.CharField(max_length=40) # Models for #16838
FootNote
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py
{ "start": 7798, "end": 8794 }
class ____: @mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook")) def test_execute(self, mock_hook): op = ManagedKafkaDeleteClusterOperator( task_id=TASK_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, location=GCP_LOCATION, project_id=GCP_PROJECT, cluster_id=TEST_CLUSTER_ID, request_id=None, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, ) op.execute(context={}) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN) mock_hook.return_value.delete_cluster.assert_called_once_with( location=GCP_LOCATION, project_id=GCP_PROJECT, cluster_id=TEST_CLUSTER_ID, request_id=None, retry=RETRY, timeout=TIMEOUT, metadata=METADATA, )
TestManagedKafkaDeleteClusterOperator
python
pennersr__django-allauth
allauth/account/views.py
{ "start": 48330, "end": 49963 }
class ____(FormView): template_name = "account/phone_change." + app_settings.TEMPLATE_EXTENSION form_class = ChangePhoneForm success_url = reverse_lazy("account_verify_phone") def get_form_class(self): return get_form_class(app_settings.FORMS, "change_phone", self.form_class) def get_form_kwargs(self): ret = super().get_form_kwargs() self._phone_verified = get_adapter().get_phone(self.request.user) if ( self.request.POST.get("action") == "verify" and self._phone_verified and not self._phone_verified[1] ): # We're (re-)sending the verificaton code, so just feed the existing # phone to the form... ret["data"] = {"phone": self._phone_verified[0]} ret["phone"] = None else: ret["phone"] = self._phone_verified[0] if self._phone_verified else None ret["user"] = self.request.user return ret def form_valid(self, form): flows.phone_verification.ChangePhoneVerificationProcess.initiate( self.request, form.cleaned_data["phone"] ) return super().form_valid(form) def get_context_data(self, **kwargs): ret = super().get_context_data(**kwargs) phone = None phone_verified = False if self._phone_verified: phone, phone_verified = self._phone_verified ret.update( { "phone": phone, "phone_verified": phone_verified, } ) return ret change_phone = ChangePhoneView.as_view()
ChangePhoneView
python
realpython__materials
game-of-life-python/source_code_final/rplife/views.py
{ "start": 99, "end": 928 }
class ____: def __init__(self, pattern, gen=10, frame_rate=7, bbox=(0, 0, 40, 20)): self.pattern = pattern self.gen = gen self.frame_rate = frame_rate self.bbox = bbox def show(self): curses.wrapper(self._draw) def _draw(self, screen): current_grid = LifeGrid(self.pattern) curses.curs_set(0) screen.clear() try: screen.addstr(0, 0, current_grid.as_string(self.bbox)) except curses.error: raise ValueError( f"Error: terminal too small for pattern '{self.pattern.name}'" ) for _ in range(self.gen): current_grid.evolve() screen.addstr(0, 0, current_grid.as_string(self.bbox)) screen.refresh() sleep(1 / self.frame_rate)
CursesView
python
PrefectHQ__prefect
tests/infrastructure/provisioners/test_ecs.py
{ "start": 19625, "end": 25206 }
class ____: @pytest.mark.usefixtures("no_default_vpc") async def test_get_task_count(self, vpc_resource): count = await vpc_resource.get_task_count() assert count == 4 async def test_get_task_count_default_vpc(self, vpc_resource): count = await vpc_resource.get_task_count() assert count == 0 @pytest.mark.usefixtures("existing_prefect_vpc") async def test_get_task_count_existing_prefect_vpc(self, vpc_resource): count = await vpc_resource.get_task_count() assert count == 0 @pytest.mark.usefixtures("existing_vpc") async def test_get_task_count_existing_vpc(self, vpc_resource): count = await vpc_resource.get_task_count() assert count == 4 async def test_requires_provisioning_default_vpc_exists(self, vpc_resource): requires_provisioning = await vpc_resource.requires_provisioning() assert not requires_provisioning @pytest.mark.usefixtures("existing_prefect_vpc") async def test_requires_provisioning_prefect_created_vpc_exists(self, vpc_resource): requires_provisioning = await vpc_resource.requires_provisioning() assert not requires_provisioning @pytest.mark.usefixtures("no_default_vpc") async def test_requires_provisioning_no_default_vpc(self, vpc_resource): requires_provisioning = await vpc_resource.requires_provisioning() assert requires_provisioning @pytest.mark.usefixtures("existing_vpc") async def test_requires_provisioning_existing_vpc(self, vpc_resource): requires_provisioning = await vpc_resource.requires_provisioning() assert requires_provisioning @pytest.mark.usefixtures("no_default_vpc") async def test_get_planned_actions_requires_provisioning(self, vpc_resource): actions = await vpc_resource.get_planned_actions() assert actions == [ "Creating a VPC with CIDR [blue]172.31.0.0/16[/] for running" " ECS tasks: [blue]prefect-ecs-vpc[/]" ] async def test_get_planned_actions_does_not_require_provisioning( self, vpc_resource ): actions = await vpc_resource.get_planned_actions() assert actions == [] @pytest.mark.usefixtures("no_default_vpc") async def test_provision(self, vpc_resource): base_job_template = { "variables": { "type": "object", "properties": {"vpc_id": {}}, } } advance_mock = MagicMock() await vpc_resource.provision( base_job_template=base_job_template, advance=advance_mock, ) assert isinstance( base_job_template["variables"]["properties"]["vpc_id"]["default"], str ) ec2 = boto3.resource("ec2") vpc = ec2.Vpc(base_job_template["variables"]["properties"]["vpc_id"]["default"]) assert vpc.cidr_block == "172.31.0.0/16" assert vpc.tags[0]["Key"] == "Name" assert vpc.tags[0]["Value"] == "prefect-ecs-vpc" assert len(list(vpc.subnets.all())) == 3 assert len(list(vpc.internet_gateways.all())) == 1 # One route table is created by default, and the other is created for the internet gateway assert len(list(vpc.route_tables.all())) == 2 # One security group is created by default, and the other is created to restrict traffic to the VPC assert len(list(vpc.security_groups.all())) == 2 advance_mock.assert_called() @pytest.mark.usefixtures("existing_prefect_vpc") async def test_provision_existing_prefect_vpc(self, vpc_resource): base_job_template = { "variables": { "type": "object", "properties": {"vpc_id": {}}, } } advance_mock = MagicMock() await vpc_resource.provision( base_job_template=base_job_template, advance=advance_mock, ) ec2 = boto3.resource("ec2") prefect_vpc = None for vpc in ec2.vpcs.all(): if vpc.tags[0]["Value"] == "prefect-ecs-vpc": prefect_vpc = vpc break assert ( base_job_template["variables"]["properties"]["vpc_id"]["default"] == prefect_vpc.id ) advance_mock.assert_not_called() @pytest.mark.usefixtures("existing_vpc") async def test_provision_existing_vpc(self, vpc_resource): base_job_template = { "variables": { "type": "object", "properties": {"vpc_id": {}}, } } advance_mock = MagicMock() await vpc_resource.provision( base_job_template=base_job_template, advance=advance_mock, ) ec2 = boto3.resource("ec2") vpc = ec2.Vpc(base_job_template["variables"]["properties"]["vpc_id"]["default"]) # The CIDR block is different to avoid a collision with the existing VPC assert vpc.cidr_block == "172.32.0.0/16" async def test_provision_default_vpc(self, vpc_resource): base_job_template = { "variables": { "type": "object", "properties": {"vpc_id": {}}, } } advance_mock = MagicMock() await vpc_resource.provision( base_job_template=base_job_template, advance=advance_mock, ) assert "default" not in base_job_template["variables"]["properties"]["vpc_id"] advance_mock.assert_not_called()
TestVpcResource