language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
RaRe-Technologies__gensim
gensim/corpora/malletcorpus.py
{ "start": 378, "end": 7714 }
class ____(LowCorpus): """Corpus handles input in `Mallet format <http://mallet.cs.umass.edu/import.php>`_. **Format description** One file, one instance per line, assume the data is in the following format :: [URL] [language] [text of the page...] Or, more generally, :: [document #1 id] [label] [text of the document...] [document #2 id] [label] [text of the document...] ... [document #N id] [label] [text of the document...] Note that language/label is *not* considered in Gensim, used `__unknown__` as default value. Examples -------- .. sourcecode:: pycon >>> from gensim.test.utils import get_tmpfile, common_texts >>> from gensim.corpora import MalletCorpus >>> from gensim.corpora import Dictionary >>> >>> # Prepare needed data >>> dictionary = Dictionary(common_texts) >>> corpus = [dictionary.doc2bow(doc) for doc in common_texts] >>> >>> # Write corpus in Mallet format to disk >>> output_fname = get_tmpfile("corpus.mallet") >>> MalletCorpus.serialize(output_fname, corpus, dictionary) >>> >>> # Read corpus >>> loaded_corpus = MalletCorpus(output_fname) """ def __init__(self, fname, id2word=None, metadata=False): """ Parameters ---------- fname : str Path to file in Mallet format. id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional Mapping between word_ids (integers) and words (strings). If not provided, the mapping is constructed directly from `fname`. metadata : bool, optional If True, return additional information ("document id" and "lang" when you call :meth:`~gensim.corpora.malletcorpus.MalletCorpus.line2doc`, :meth:`~gensim.corpora.malletcorpus.MalletCorpus.__iter__` or :meth:`~gensim.corpora.malletcorpus.MalletCorpus.docbyoffset` """ self.metadata = metadata LowCorpus.__init__(self, fname, id2word) def _calculate_num_docs(self): """Get number of documents. Returns ------- int Number of documents in file. """ with utils.open(self.fname, 'rb') as fin: result = sum(1 for _ in fin) return result def __iter__(self): """Iterate over the corpus. Yields ------ list of (int, int) Document in BoW format (+"document_id" and "lang" if metadata=True). """ with utils.open(self.fname, 'rb') as f: for line in f: yield self.line2doc(line) def line2doc(self, line): """Covert line into document in BoW format. Parameters ---------- line : str Line from input file. Returns ------- list of (int, int) Document in BoW format (+"document_id" and "lang" if metadata=True). Examples -------- .. sourcecode:: pycon >>> from gensim.test.utils import datapath >>> from gensim.corpora import MalletCorpus >>> >>> corpus = MalletCorpus(datapath("testcorpus.mallet")) >>> corpus.line2doc("en computer human interface") [(3, 1), (4, 1)] """ split_line = utils.to_unicode(line).strip().split(None, 2) docid, doclang = split_line[0], split_line[1] words = split_line[2] if len(split_line) >= 3 else '' doc = super(MalletCorpus, self).line2doc(words) if self.metadata: return doc, (docid, doclang) else: return doc @staticmethod def save_corpus(fname, corpus, id2word=None, metadata=False): """Save a corpus in the Mallet format. Warnings -------- This function is automatically called by :meth:`gensim.corpora.malletcorpus.MalletCorpus.serialize`, don't call it directly, call :meth:`gensim.corpora.lowcorpus.malletcorpus.MalletCorpus.serialize` instead. Parameters ---------- fname : str Path to output file. corpus : iterable of iterable of (int, int) Corpus in BoW format. id2word : {dict of (int, str), :class:`~gensim.corpora.dictionary.Dictionary`}, optional Mapping between word_ids (integers) and words (strings). If not provided, the mapping is constructed directly from `corpus`. metadata : bool, optional If True - ???? Return ------ list of int List of offsets in resulting file for each document (in bytes), can be used for :meth:`~gensim.corpora.malletcorpus.Malletcorpus.docbyoffset`. Notes ----- The document id will be generated by enumerating the corpus. That is, it will range between 0 and number of documents in the corpus. Since Mallet has a language field in the format, this defaults to the string '__unknown__'. If the language needs to be saved, post-processing will be required. """ if id2word is None: logger.info("no word id mapping provided; initializing from corpus") id2word = utils.dict_from_corpus(corpus) logger.info("storing corpus in Mallet format into %s", fname) truncated = 0 offsets = [] with utils.open(fname, 'wb') as fout: for doc_id, doc in enumerate(corpus): if metadata: doc_id, doc_lang = doc[1] doc = doc[0] else: doc_lang = '__unknown__' words = [] for wordid, value in doc: if abs(int(value) - value) > 1e-6: truncated += 1 words.extend([utils.to_unicode(id2word[wordid])] * int(value)) offsets.append(fout.tell()) fout.write(utils.to_utf8('%s %s %s\n' % (doc_id, doc_lang, ' '.join(words)))) if truncated: logger.warning( "Mallet format can only save vectors with integer elements; " "%i float entries were truncated to integer value", truncated ) return offsets def docbyoffset(self, offset): """Get the document stored in file by `offset` position. Parameters ---------- offset : int Offset (in bytes) to begin of document. Returns ------- list of (int, int) Document in BoW format (+"document_id" and "lang" if metadata=True). Examples -------- .. sourcecode:: pycon >>> from gensim.test.utils import datapath >>> from gensim.corpora import MalletCorpus >>> >>> data = MalletCorpus(datapath("testcorpus.mallet")) >>> data.docbyoffset(1) # end of first line [(3, 1), (4, 1)] >>> data.docbyoffset(4) # start of second line [(4, 1)] """ with utils.open(self.fname, 'rb') as f: f.seek(offset) return self.line2doc(f.readline())
MalletCorpus
python
apache__thrift
lib/py/src/protocol/TBase.py
{ "start": 2042, "end": 2093 }
class ____(TBase, Exception): pass
TExceptionBase
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/test/steps/common.py
{ "start": 6827, "end": 9119 }
class ____(SimpleDockerStep): """A step to run QA checks for a connectors. More details in https://github.com/airbytehq/airbyte/blob/main/airbyte-ci/connectors/connectors_qa/README.md """ def __init__(self, context: ConnectorContext) -> None: code_directory = context.connector.code_directory documentation_file_path = context.connector.documentation_file_path migration_guide_file_path = context.connector.migration_guide_file_path icon_path = context.connector.icon_path technical_name = context.connector.technical_name # When the connector is strict-encrypt, we should run QA checks on the main one as it's the one whose artifacts gets released if context.connector.technical_name.endswith("-strict-encrypt"): technical_name = technical_name.replace("-strict-encrypt", "") code_directory = Path(str(code_directory).replace("-strict-encrypt", "")) if documentation_file_path: documentation_file_path = Path(str(documentation_file_path).replace("-strict-encrypt", "")) if migration_guide_file_path: migration_guide_file_path = Path(str(migration_guide_file_path).replace("-strict-encrypt", "")) if icon_path: icon_path = Path(str(icon_path).replace("-strict-encrypt", "")) super().__init__( title=f"Run QA checks for {technical_name}", context=context, paths_to_mount=[ MountPath(code_directory), # These paths are optional # But their absence might make the QA check fail MountPath(documentation_file_path, optional=True), MountPath(migration_guide_file_path, optional=True), MountPath(icon_path, optional=True), ], internal_tools=[ MountPath(INTERNAL_TOOL_PATHS.CONNECTORS_QA.value), ], secret_env_variables={"DOCKER_HUB_USERNAME": context.docker_hub_username, "DOCKER_HUB_PASSWORD": context.docker_hub_password} if context.docker_hub_username and context.docker_hub_password else None, command=["connectors-qa", "run", f"--name={technical_name}"], )
QaChecks
python
sympy__sympy
sympy/polys/numberfields/galois_resolvents.py
{ "start": 1603, "end": 1661 }
class ____(GaloisGroupException): ...
ResolventException
python
Lightning-AI__lightning
examples/pytorch/bug_report/bug_report_model.py
{ "start": 388, "end": 1701 }
class ____(LightningModule): def __init__(self): super().__init__() self.layer = torch.nn.Linear(32, 2) def forward(self, x): return self.layer(x) def training_step(self, batch, batch_idx): loss = self(batch).sum() self.log("train_loss", loss) return {"loss": loss} def validation_step(self, batch, batch_idx): loss = self(batch).sum() self.log("valid_loss", loss) def test_step(self, batch, batch_idx): loss = self(batch).sum() self.log("test_loss", loss) def configure_optimizers(self): return torch.optim.SGD(self.layer.parameters(), lr=0.1) def run(): train_data = DataLoader(RandomDataset(32, 64), batch_size=2) val_data = DataLoader(RandomDataset(32, 64), batch_size=2) test_data = DataLoader(RandomDataset(32, 64), batch_size=2) model = BoringModel() trainer = Trainer( default_root_dir=os.getcwd(), limit_train_batches=1, limit_val_batches=1, limit_test_batches=1, num_sanity_val_steps=0, max_epochs=1, enable_model_summary=False, ) trainer.fit(model, train_dataloaders=train_data, val_dataloaders=val_data) trainer.test(model, dataloaders=test_data) if __name__ == "__main__": run()
BoringModel
python
coleifer__peewee
tests/fields.py
{ "start": 1614, "end": 2312 }
class ____(ModelTestCase): requires = [DefaultValues] def test_default_values(self): d = DefaultValues() self.assertEqual(d.data, 17) self.assertEqual(d.data_callable, 1337) d.save() d_db = DefaultValues.get(DefaultValues.id == d.id) self.assertEqual(d_db.data, 17) self.assertEqual(d_db.data_callable, 1337) def test_defaults_create(self): d = DefaultValues.create() self.assertEqual(d.data, 17) self.assertEqual(d.data_callable, 1337) d_db = DefaultValues.get(DefaultValues.id == d.id) self.assertEqual(d_db.data, 17) self.assertEqual(d_db.data_callable, 1337)
TestDefaultValues
python
ipython__ipython
IPython/core/magics/ast_mod.py
{ "start": 5669, "end": 7649 }
class ____(NodeTransformer): """ Mangle given names in and ast tree to make sure they do not conflict with user code. """ enabled: bool = True debug: bool = False def log(self, *args, **kwargs): if self.debug: print(*args, **kwargs) def __init__(self, predicate=None): if predicate is None: predicate = lambda name: name.startswith("___") self.predicate = predicate def visit_Name(self, node): if self.predicate(node.id): self.log("Mangling", node.id) # Once in the ast we do not need # names to be valid identifiers. node.id = "mangle-" + node.id else: self.log("Not mangling", node.id) return node def visit_FunctionDef(self, node): if self.predicate(node.name): self.log("Mangling", node.name) node.name = "mangle-" + node.name else: self.log("Not mangling", node.name) for arg in node.args.args: if self.predicate(arg.arg): self.log("Mangling function arg", arg.arg) arg.arg = "mangle-" + arg.arg else: self.log("Not mangling function arg", arg.arg) return self.generic_visit(node) def visit_ImportFrom(self, node: ImportFrom): return self._visit_Import_and_ImportFrom(node) def visit_Import(self, node: Import): return self._visit_Import_and_ImportFrom(node) def _visit_Import_and_ImportFrom(self, node: Union[Import, ImportFrom]): for alias in node.names: asname = alias.name if alias.asname is None else alias.asname if self.predicate(asname): new_name: str = "mangle-" + asname self.log("Mangling Alias", new_name) alias.asname = new_name else: self.log("Not mangling Alias", alias.asname) return node
Mangler
python
walkccc__LeetCode
solutions/1281. Subtract the Product and Sum of Digits of an Integer/1281.py
{ "start": 0, "end": 191 }
class ____: def subtractProductAndSum(self, n: int) -> int: prod = 1 summ = 0 while n > 0: prod *= n % 10 summ += n % 10 n //= 10 return prod - summ
Solution
python
allegroai__clearml
clearml/backend_api/services/v2_23/frames.py
{ "start": 172407, "end": 173337 }
class ____(Request): """ Gets the count of frames matching the given dataview :param dataview: Dataview ID :type dataview: str """ _service = "frames" _action = "get_count_for_dataview_id" _version = "2.23" _schema = { "definitions": {}, "properties": {"dataview": {"description": "Dataview ID", "type": "string"}}, "required": ["dataview"], } def __init__(self, dataview, **kwargs): super(GetCountForDataviewIdRequest, self).__init__(**kwargs) self.dataview = dataview @schema_property("dataview") def dataview(self): return self._property_dataview @dataview.setter def dataview(self, value): if value is None: self._property_dataview = None return self.assert_isinstance(value, "dataview", six.string_types) self._property_dataview = value
GetCountForDataviewIdRequest
python
sympy__sympy
sympy/plotting/pygletplot/plot_modes.py
{ "start": 4388, "end": 5352 }
class ____(PlotSurface): i_vars, d_vars = 'tp', 'r' intervals = [[0, 2*pi, 40], [0, pi, 20]] aliases = ['spherical'] is_default = False def _get_sympy_evaluator(self): fr = self.d_vars[0] t = self.u_interval.v p = self.v_interval.v def e(_t, _p): _r = float(fr.subs(t, _t).subs(p, _p)) return (_r*p_cos(_t)*p_sin(_p), _r*p_sin(_t)*p_sin(_p), _r*p_cos(_p)) return e def _get_lambda_evaluator(self): fr = self.d_vars[0] t = self.u_interval.v p = self.v_interval.v fx = fr * cos(t) * sin(p) fy = fr * sin(t) * sin(p) fz = fr * cos(p) return lambdify([t, p], [fx, fy, fz]) Cartesian2D._register() Cartesian3D._register() ParametricCurve2D._register() ParametricCurve3D._register() ParametricSurface._register() Polar._register() Cylindrical._register() Spherical._register()
Spherical
python
aimacode__aima-python
deep_learning4e.py
{ "start": 703, "end": 1077 }
class ____: """ A layer in a neural network based on a computational graph. :param size: number of units in the current layer """ def __init__(self, size): self.nodes = np.array([Node() for _ in range(size)]) def forward(self, inputs): """Define the operation to get the output of this layer""" raise NotImplementedError
Layer
python
ray-project__ray
python/ray/tune/search/_mock.py
{ "start": 199, "end": 1168 }
class ____(Searcher): def __init__(self, **kwargs): self.live_trials = {} self.counter = {"result": 0, "complete": 0} self.final_results = [] self.stall = False self.results = [] super(_MockSearcher, self).__init__(**kwargs) def suggest(self, trial_id: str): if not self.stall: self.live_trials[trial_id] = 1 return {"test_variable": 2} return None def on_trial_result(self, trial_id: str, result: Dict): self.counter["result"] += 1 self.results += [result] def on_trial_complete( self, trial_id: str, result: Optional[Dict] = None, error: bool = False ): self.counter["complete"] += 1 if result: self._process_result(result) if trial_id in self.live_trials: del self.live_trials[trial_id] def _process_result(self, result: Dict): self.final_results += [result]
_MockSearcher
python
huggingface__transformers
src/transformers/models/layoutlmv2/modeling_layoutlmv2.py
{ "start": 51533, "end": 60065 }
class ____(LayoutLMv2PreTrainedModel): def __init__(self, config, has_visual_segment_embedding=True): r""" has_visual_segment_embedding (`bool`, *optional*, defaults to `True`): Whether or not to add visual segment embeddings. """ super().__init__(config) self.num_labels = config.num_labels config.has_visual_segment_embedding = has_visual_segment_embedding self.layoutlmv2 = LayoutLMv2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, image: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: r""" input_ids (`torch.LongTensor` of shape `batch_size, sequence_length`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`): Batch of document images. token_type_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) Example: In this example below, we give the LayoutLMv2 model an image (of texts) and ask it a question. It will give us a prediction of what it thinks the answer is (the span of the answer within the texts parsed from the image). ```python >>> from transformers import AutoProcessor, LayoutLMv2ForQuestionAnswering, set_seed >>> import torch >>> from PIL import Image >>> from datasets import load_dataset >>> set_seed(0) >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") >>> model = LayoutLMv2ForQuestionAnswering.from_pretrained("microsoft/layoutlmv2-base-uncased") >>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa") >>> image = dataset["test"][0]["image"] >>> question = "When is coffee break?" >>> encoding = processor(image, question, return_tensors="pt") >>> outputs = model(**encoding) >>> predicted_start_idx = outputs.start_logits.argmax(-1).item() >>> predicted_end_idx = outputs.end_logits.argmax(-1).item() >>> predicted_start_idx, predicted_end_idx (30, 191) >>> predicted_answer_tokens = encoding.input_ids.squeeze()[predicted_start_idx : predicted_end_idx + 1] >>> predicted_answer = processor.tokenizer.decode(predicted_answer_tokens) >>> predicted_answer # results are not good without further fine-tuning '44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from' ``` ```python >>> target_start_index = torch.tensor([7]) >>> target_end_index = torch.tensor([14]) >>> outputs = model(**encoding, start_positions=target_start_index, end_positions=target_end_index) >>> predicted_answer_span_start = outputs.start_logits.argmax(-1).item() >>> predicted_answer_span_end = outputs.end_logits.argmax(-1).item() >>> predicted_answer_span_start, predicted_answer_span_end (30, 191) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv2( input_ids=input_ids, bbox=bbox, image=image, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # only take the text part of the output representations sequence_output = outputs[0][:, :seq_length] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ]
LayoutLMv2ForQuestionAnswering
python
pandas-dev__pandas
asv_bench/benchmarks/join_merge.py
{ "start": 1316, "end": 1938 }
class ____: params = ([0, 1], [True, False]) param_names = ["axis", "ignore_index"] def setup(self, axis, ignore_index): frame_c = DataFrame(np.zeros((10000, 200), dtype=np.float32, order="C")) self.frame_c = [frame_c] * 20 frame_f = DataFrame(np.zeros((10000, 200), dtype=np.float32, order="F")) self.frame_f = [frame_f] * 20 def time_c_ordered(self, axis, ignore_index): concat(self.frame_c, axis=axis, ignore_index=ignore_index) def time_f_ordered(self, axis, ignore_index): concat(self.frame_f, axis=axis, ignore_index=ignore_index)
ConcatDataFrames
python
getsentry__sentry
src/sentry/search/eap/types.py
{ "start": 680, "end": 2112 }
class ____: # Automatically add id, etc. if there are no aggregates auto_fields: bool = False # Ignore aggregate conditions, if false the query will run but not use any aggregate conditions use_aggregate_conditions: bool = True # TODO: do we need parser_config_overrides? it looks like its just for alerts # Whether to process the results from snuba process_results: bool = True # If a field is private, it will only be available if it is in the `fields_acl` fields_acl: FieldsACL = field(default_factory=lambda: FieldsACL()) # If set to True, do not extrapolate any values regardless of individual aggregate settings disable_aggregate_extrapolation: bool = False extrapolation_mode: ExtrapolationMode.ValueType | None = None # Whether to set the timestamp granularities to stable buckets stable_timestamp_quantization: bool = True def extra_conditions( self, search_resolver: "SearchResolver", selected_columns: list[str] | None, equations: list[str] | None, ) -> TraceItemFilter | None: return None CONFIDENCES: dict[Reliability.ValueType, Literal["low", "high"]] = { Reliability.RELIABILITY_LOW: "low", Reliability.RELIABILITY_HIGH: "high", } Confidence = Literal["low", "high"] | None ConfidenceData = list[dict[str, Confidence]] # These are the strings that are used in the API for convienence
SearchResolverConfig
python
huggingface__transformers
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
{ "start": 123252, "end": 123514 }
class ____(MoeCausalLMOutputWithPast): r""" generation_step (`int`, *optional*): Current generation step, used to track which `trailing_text_hidden` should be used. """ generation_step: Optional[int] = None
Qwen3OmniMoeTalkerOutputWithPast
python
dagster-io__dagster
python_modules/dagster/dagster/components/resolved/model.py
{ "start": 2523, "end": 9201 }
class ____: """Contains information on how to resolve a value from YAML into the corresponding :py:class:`Resolved` class field. You can attach a resolver to a field's type annotation to control how the value is resolved. Example: .. code-block:: python import datetime from typing import Annotated import dagster as dg def resolve_timestamp( context: dg.ResolutionContext, raw_timestamp: str, ) -> datetime.datetime: return datetime.datetime.fromisoformat( context.resolve_value(raw_timestamp, as_type=str), ) class MyClass(dg.Resolvable, dg.Model): event: str # the yaml field will be a string, which is then parsed into a datetime object timestamp: Annotated[ datetime.datetime, dg.Resolver(resolve_timestamp, model_field_type=str), ] """ def __init__( self, fn: Union[ParentFn, AttrWithContextFn, Callable[["ResolutionContext", Any], Any]], *, model_field_name: Optional[str] = None, model_field_type: Optional[type | UnionType] = None, description: Optional[str] = None, examples: Optional[list[Any]] = None, inject_before_resolve: bool = True, ): """Resolve this field by invoking the function which will receive the corresponding field value from the model. Args: fn (Callable[[ResolutionContext, Any], Any]): The custom resolution function. model_field_name (Optional[str]): Override the name of the field on the generated pydantic model. This is the name that to be used in yaml. model_field_type (Optional[type]): Override the type of this field on the generated pydantic model. This will define the schema used in yaml. description (Optional[str]): Description to add to the generated pydantic model. This will show up in documentation and IDEs during yaml editing. examples (Optional[list[Any]]): Example values that are valid when loading from yaml. inject_before_resolve (bool): If True (Default) string values will be evaluated to perform possible template resolution before calling the resolver function. """ if not isinstance(fn, (ParentFn, AttrWithContextFn)): if not callable(fn): check.param_invariant( callable(fn), "fn", f"must be callable if not ParentFn or AttrWithContextFn. Got {fn}", ) self.fn = AttrWithContextFn(fn) else: self.fn = fn self.model_field_name = model_field_name self.model_field_type = model_field_type self.description = description self.examples = examples self.inject_before_resolve = inject_before_resolve super().__init__() @staticmethod def from_model(fn: Callable[["ResolutionContext", Any], Any], **kwargs): """Resolve this field by invoking the function which will receive the entire parent model.""" return Resolver(ParentFn(fn), **kwargs) @staticmethod def union(arg_resolver_pairs: Sequence[tuple[type, "Resolver"]]): field_types = tuple(r.model_field_type or t for t, r in arg_resolver_pairs) return Resolver( fn=functools.partial(resolve_union, [r for _, r in arg_resolver_pairs]), model_field_type=Union[field_types], # pyright: ignore[reportInvalidTypeArguments] ) @staticmethod def default( *, model_field_name: Optional[str] = None, model_field_type: Optional[type | UnionType] = None, description: Optional[str] = None, examples: Optional[list[Any]] = None, ): """Default recursive resolution.""" return Resolver( _default_fn, model_field_name=model_field_name, model_field_type=model_field_type, description=description, examples=examples, inject_before_resolve=False, ) @staticmethod def passthrough( description: Optional[str] = None, examples: Optional[list[Any]] = None, ): """Resolve this field by returning the underlying value, without resolving any nested resolvers or processing any template variables. """ return Resolver( _passthrough_fn, inject_before_resolve=False, description=description, examples=examples, ) def execute( self, context: "ResolutionContext", model: BaseModel, field_name: str, ) -> Any: from dagster.components.resolved.context import ResolutionException try: if isinstance(self.fn, ParentFn): return self.fn.callable(context, model) elif isinstance(self.fn, AttrWithContextFn): field_name = self.model_field_name or field_name attr = getattr(model, field_name) context = context.at_path(field_name) # handle template injection if self.inject_before_resolve and isinstance(attr, str): attr = context.resolve_value(attr) if not isinstance(attr, str): return attr return self.fn.callable(context, attr) except ResolutionException: raise # already processed except Exception: raise context.build_resolve_fn_exc( traceback.format_exception(*sys.exc_info()), field_name=field_name, model=model, ) from None raise ValueError(f"Unsupported Resolver type: {self.fn}") @property def is_default(self): return self.fn is _default_fn @property def resolves_from_parent_object(self) -> bool: return isinstance(self.fn, ParentFn) def with_outer_resolver(self, outer: "Resolver"): description = outer.description or self.description examples = outer.examples or self.examples return Resolver( self.fn, model_field_name=self.model_field_name, model_field_type=self.model_field_type, description=description, examples=examples, inject_before_resolve=self.inject_before_resolve, ) T = TypeVar("T") Injected = Annotated[T, Resolver.default(model_field_type=str)]
Resolver
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/conditional_provider/package.py
{ "start": 216, "end": 566 }
class ____(Package): """Mimic the real netlib-lapack, that may be built on top of an optimized blas. """ homepage = "https://dev.null" version("1.0") variant("disable-v1", default=False, description="nope") provides("v2") provides("v1", when="~disable-v1") depends_on("v1", when="+disable-v1")
ConditionalProvider
python
dask__dask
dask/dataframe/dask_expr/_expr.py
{ "start": 44192, "end": 44362 }
class ____(RenameFrame): _preserves_partitioning_information = True @staticmethod def operation(df, columns): return _rename(columns, df)
ColumnsSetter
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/missingTypeArg1.py
{ "start": 1152, "end": 1300 }
class ____: @staticmethod def method1(data: int | str | dict[str, str]): if isinstance(data, dict | str): return data
ClassA
python
wandb__wandb
wandb/sdk/launch/agent/config.py
{ "start": 2592, "end": 5262 }
class ____(BaseModel): type: Optional[BuilderType] = Field( None, description="The type of builder to use.", ) destination: Optional[str] = Field( None, description="The destination to use for the built image. If not provided, " "the image will be pushed to the registry.", ) platform: Optional[TargetPlatform] = Field( None, description="The platform to use for the built image. If not provided, " "the platform will be detected automatically.", ) build_context_store: Optional[str] = Field( None, description="The build context store to use. Required for kaniko builds.", alias="build-context-store", ) build_job_name: Optional[str] = Field( "wandb-launch-container-build", description="Name prefix of the build job.", alias="build-job-name", ) secret_name: Optional[str] = Field( None, description="The name of the secret to use for the build job.", alias="secret-name", ) secret_key: Optional[str] = Field( None, description="The key of the secret to use for the build job.", alias="secret-key", ) kaniko_image: Optional[str] = Field( "gcr.io/kaniko-project/executor:latest", description="The image to use for the kaniko executor.", alias="kaniko-image", ) @validator("build_context_store") # type: ignore @classmethod def validate_build_context_store( cls, build_context_store: Optional[str] ) -> Optional[str]: """Validate that the build context store is a valid container registry URI.""" if build_context_store is None: return None for regex in [ S3_URI_RE, GCS_URI_RE, AZURE_BLOB_REGEX, ]: if regex.match(build_context_store): return build_context_store raise ValueError( "Invalid build context store. Build context store must be a URI for an " "S3 bucket, GCS bucket, or Azure blob." ) @root_validator(pre=True) # type: ignore @classmethod def validate_docker(cls, values: dict) -> dict: """Right now there are no required fields for docker builds.""" return values @validator("destination") # type: ignore @classmethod def validate_destination(cls, destination: Optional[str]) -> Optional[str]: """Validate that the destination is a valid container registry URI.""" if destination is None: return None return validate_registry_uri(destination)
BuilderConfig
python
kubernetes-client__python
kubernetes/client/models/v1_api_service_status.py
{ "start": 383, "end": 3626 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'conditions': 'list[V1APIServiceCondition]' } attribute_map = { 'conditions': 'conditions' } def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501 """V1APIServiceStatus - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._conditions = None self.discriminator = None if conditions is not None: self.conditions = conditions @property def conditions(self): """Gets the conditions of this V1APIServiceStatus. # noqa: E501 Current service state of apiService. # noqa: E501 :return: The conditions of this V1APIServiceStatus. # noqa: E501 :rtype: list[V1APIServiceCondition] """ return self._conditions @conditions.setter def conditions(self, conditions): """Sets the conditions of this V1APIServiceStatus. Current service state of apiService. # noqa: E501 :param conditions: The conditions of this V1APIServiceStatus. # noqa: E501 :type: list[V1APIServiceCondition] """ self._conditions = conditions def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1APIServiceStatus): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1APIServiceStatus): return True return self.to_dict() != other.to_dict()
V1APIServiceStatus
python
scipy__scipy
scipy/stats/tests/test_multivariate.py
{ "start": 116425, "end": 120471 }
class ____: def test_reproducibility(self): rng = np.random.RandomState(514) eigs = (.5, .8, 1.2, 1.5) x = random_correlation.rvs(eigs, random_state=rng) x2 = random_correlation.rvs(eigs, random_state=514) expected = np.array([[1., -0.184851, 0.109017, -0.227494], [-0.184851, 1., 0.231236, 0.326669], [0.109017, 0.231236, 1., -0.178912], [-0.227494, 0.326669, -0.178912, 1.]]) assert_array_almost_equal(x, expected) assert_array_almost_equal(x2, expected) def test_invalid_eigs(self): assert_raises(ValueError, random_correlation.rvs, None) assert_raises(ValueError, random_correlation.rvs, 'test') assert_raises(ValueError, random_correlation.rvs, 2.5) assert_raises(ValueError, random_correlation.rvs, [2.5]) assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]]) assert_raises(ValueError, random_correlation.rvs, [2.5, -.5]) assert_raises(ValueError, random_correlation.rvs, [1, 2, .1]) def test_frozen_matrix(self): eigs = (.5, .8, 1.2, 1.5) frozen = random_correlation(eigs) frozen_seed = random_correlation(eigs, seed=514) rvs1 = random_correlation.rvs(eigs, random_state=514) rvs2 = frozen.rvs(random_state=514) rvs3 = frozen_seed.rvs() assert_equal(rvs1, rvs2) assert_equal(rvs1, rvs3) def test_definition(self): # Test the definition of a correlation matrix in several dimensions: # # 1. Det is product of eigenvalues (and positive by construction # in examples) # 2. 1's on diagonal # 3. Matrix is symmetric def norm(i, e): return i*e/sum(e) rng = np.random.RandomState(123) eigs = [norm(i, rng.uniform(size=i)) for i in range(2, 6)] eigs.append([4,0,0,0]) ones = [[1.]*len(e) for e in eigs] xs = [random_correlation.rvs(e, random_state=rng) for e in eigs] # Test that determinants are products of eigenvalues # These are positive by construction # Could also test that the eigenvalues themselves are correct, # but this seems sufficient. dets = [np.fabs(np.linalg.det(x)) for x in xs] dets_known = [np.prod(e) for e in eigs] assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13) # Test for 1's on the diagonal diags = [np.diag(x) for x in xs] for a, b in zip(diags, ones): assert_allclose(a, b, rtol=1e-13) # Correlation matrices are symmetric for x in xs: assert_allclose(x, x.T, rtol=1e-13) def test_to_corr(self): # Check some corner cases in to_corr # ajj == 1 m = np.array([[0.1, 0], [0, 1]], dtype=float) m = random_correlation._to_corr(m) assert_allclose(m, np.array([[1, 0], [0, 0.1]])) # Floating point overflow; fails to compute the correct # rotation, but should still produce some valid rotation # rather than infs/nans with np.errstate(over='ignore'): g = np.array([[0, 1], [-1, 0]]) m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float) m = random_correlation._to_corr(m0.copy()) assert_allclose(m, g.T.dot(m0).dot(g)) m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float) m = random_correlation._to_corr(m0.copy()) assert_allclose(m, g.T.dot(m0).dot(g)) # Zero discriminant; should set the first diag entry to 1 m0 = np.array([[2, 1], [1, 2]], dtype=float) m = random_correlation._to_corr(m0.copy()) assert_allclose(m[0,0], 1) # Slightly negative discriminant; should be approx correct still m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float) m = random_correlation._to_corr(m0.copy()) assert_allclose(m[0,0], 1)
TestRandomCorrelation
python
mlflow__mlflow
tests/resources/mlflow-test-plugin/mlflow_test_plugin/file_store.py
{ "start": 78, "end": 379 }
class ____(FileStore): """FileStore provided through entrypoints system""" def __init__(self, store_uri=None, artifact_uri=None): path = urllib.parse.urlparse(store_uri).path if store_uri else None self.is_plugin = True super().__init__(path, artifact_uri)
PluginFileStore
python
pypa__hatch
tests/backend/builders/test_binary.py
{ "start": 7226, "end": 8395 }
class ____: def test_default(self, isolation): config = {"project": {"name": "My.App", "version": "0.1.0"}} builder = BinaryBuilder(str(isolation), config=config) assert builder.config.pyapp_version == builder.config.pyapp_version == "" def test_set(self, isolation): config = { "project": { "name": "My.App", "version": "0.1.0", }, "tool": {"hatch": {"build": {"targets": {"binary": {"pyapp-version": "9000"}}}}}, } builder = BinaryBuilder(str(isolation), config=config) assert builder.config.pyapp_version == "9000" def test_not_string(self, isolation): config = { "project": { "name": "My.App", "version": "0.1.0", }, "tool": {"hatch": {"build": {"targets": {"binary": {"pyapp-version": 9000}}}}}, } builder = BinaryBuilder(str(isolation), config=config) with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.binary.pyapp-version` must be a string"): _ = builder.config.pyapp_version
TestPyAppVersion
python
django__django
django/contrib/gis/geos/collections.py
{ "start": 3472, "end": 3549 }
class ____(GeometryCollection): _allowed = Point _typeid = 4
MultiPoint
python
apache__airflow
devel-common/src/tests_common/test_utils/mock_operators.py
{ "start": 2529, "end": 3592 }
class ____(BaseOperatorLink): """Custom Operator Link for Google BigQuery Console.""" index: int = attr.ib() @property def name(self) -> str: return f"BigQuery Console #{self.index + 1}" @property def xcom_key(self) -> str: return f"bigquery_{self.index + 1}" def get_link(self, operator, *, ti_key): if AIRFLOW_V_3_0_PLUS: search_queries = XCom.get_many( task_id=ti_key.task_id, dag_id=ti_key.dag_id, run_id=ti_key.run_id, key="search_query" ).first() search_queries = XCom.deserialize_value(search_queries) else: search_queries = XCom.get_one( task_id=ti_key.task_id, dag_id=ti_key.dag_id, run_id=ti_key.run_id, key="search_query" ) if not search_queries: return None if len(search_queries) < self.index: return None search_query = search_queries[self.index] return f"https://console.cloud.google.com/bigquery?j={search_query}"
CustomBaseIndexOpLink
python
dask__distributed
distributed/utils_test.py
{ "start": 69179, "end": 72176 }
class ____(Worker): """Block get_data RPC call until at least barrier_count connections are going on in parallel at the same time See also -------- BlockedGatherDep BlockedGetData BlockedExecute """ def __init__(self, *args, barrier_count, **kwargs): # TODO just use asyncio.Barrier (needs Python >=3.11) self.barrier_count = barrier_count self.wait_get_data = asyncio.Event() super().__init__(*args, **kwargs) async def get_data(self, comm, *args, **kwargs): self.barrier_count -= 1 if self.barrier_count > 0: await self.wait_get_data.wait() else: self.wait_get_data.set() return await super().get_data(comm, *args, **kwargs) @contextmanager def freeze_data_fetching(w: Worker, *, jump_start: bool = False) -> Iterator[None]: """Prevent any task from transitioning from fetch to flight on the worker while inside the context, simulating a situation where the worker's network comms are saturated. This is not the same as setting the worker to Status=paused, which would also inform the Scheduler and prevent further tasks to be enqueued on the worker. Parameters ---------- w: Worker The Worker on which tasks will not transition from fetch to flight jump_start: bool If False, tasks will remain in fetch state after exiting the context, until something else triggers ensure_communicating. If True, trigger ensure_communicating on exit; this simulates e.g. an unrelated worker moving out of in_flight_workers. """ old_count_limit = w.state.transfer_incoming_count_limit old_threshold = w.state.transfer_incoming_bytes_throttle_threshold w.state.transfer_incoming_count_limit = 0 w.state.transfer_incoming_bytes_throttle_threshold = 0 yield w.state.transfer_incoming_count_limit = old_count_limit w.state.transfer_incoming_bytes_throttle_threshold = old_threshold if jump_start: w.status = Status.paused w.status = Status.running @contextmanager def freeze_batched_send(bcomm: BatchedSend) -> Iterator[LockedComm]: """ Contextmanager blocking writes to a `BatchedSend` from sending over the network. The returned `LockedComm` object can be used for control flow and inspection via its ``read_event``, ``read_queue``, ``write_event``, and ``write_queue`` attributes. On exit, any writes that were blocked are un-blocked, and the original comm of the `BatchedSend` is restored. """ assert not bcomm.closed() assert bcomm.comm assert not bcomm.comm.closed() orig_comm = bcomm.comm write_event = asyncio.Event() write_queue: asyncio.Queue = asyncio.Queue() bcomm.comm = locked_comm = LockedComm( orig_comm, None, None, write_event, write_queue ) try: yield locked_comm finally: write_event.set() bcomm.comm = orig_comm
BarrierGetData
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/class_interval.py
{ "start": 3325, "end": 3470 }
class ____: def foo(self): return self.bar() # Interval: [3,4] def bar(self): return _test_source() # Interval: [3,4]
B7
python
Textualize__textual
docs/examples/styles/border_sub_title_align_all.py
{ "start": 361, "end": 2444 }
class ____(App[None]): CSS_PATH = "border_sub_title_align_all.tcss" def compose(self): with Grid(): yield make_label_container( # (1)! "This is the story of", "lbl1", "[b]Border [i]title[/i][/]", "[u][r]Border[/r] subtitle[/]", ) yield make_label_container( # (2)! "a Python", "lbl2", "[b red]Left, but it's loooooooooooong", "[reverse]Center, but it's loooooooooooong", ) yield make_label_container( # (3)! "developer that", "lbl3", "[b i on purple]Left[/]", "[r u white on black]@@@[/]", ) yield make_label_container( "had to fill up", "lbl4", "", # (4)! "[link='https://textual.textualize.io']Left[/]", # (5)! ) yield make_label_container( # (6)! "nine labels", "lbl5", "Title", "Subtitle" ) yield make_label_container( # (7)! "and ended up redoing it", "lbl6", "Title", "Subtitle", ) yield make_label_container( # (8)! "because the first try", "lbl7", "Title, but really loooooooooong!", "Subtitle, but really loooooooooong!", ) yield make_label_container( # (9)! "had some labels", "lbl8", "Title, but really loooooooooong!", "Subtitle, but really loooooooooong!", ) yield make_label_container( # (10)! "that were too long.", "lbl9", "Title, but really loooooooooong!", "Subtitle, but really loooooooooong!", ) if __name__ == "__main__": app = BorderSubTitleAlignAll() app.run()
BorderSubTitleAlignAll
python
spyder-ide__spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
{ "start": 4451, "end": 4644 }
class ____: Context = 'context_menu' ConvertTo = 'convert_to_submenu' Header = 'header_context_menu' Index = 'index_context_menu' Options = 'options_menu'
DataframeEditorMenus
python
neetcode-gh__leetcode
python/0909-snakes-and-ladders.py
{ "start": 0, "end": 912 }
class ____: def snakesAndLadders(self, board: List[List[int]]) -> int: length = len(board) board.reverse() def intToPos(square): r = (square - 1) // length c = (square - 1) % length if r % 2: c = length - 1 - c return [r, c] q = deque() q.append([1, 0]) # [square, moves] visit = set() while q: square, moves = q.popleft() for i in range(1, 7): nextSquare = square + i r, c = intToPos(nextSquare) if board[r][c] != -1: nextSquare = board[r][c] if nextSquare == length * length: return moves + 1 if nextSquare not in visit: visit.add(nextSquare) q.append([nextSquare, moves + 1]) return -1
Solution
python
pyca__cryptography
tests/hazmat/primitives/decrepit/test_3des.py
{ "start": 4857, "end": 5637 }
class ____: test_kat = generate_encrypt_test( load_nist_vectors, os.path.join("ciphers", "3DES", "ECB"), [ "TECBinvperm.rsp", "TECBpermop.rsp", "TECBsubtab.rsp", "TECBvarkey.rsp", "TECBvartext.rsp", ], lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys)), lambda **kwargs: modes.ECB(), ) test_mmt = generate_encrypt_test( load_nist_vectors, os.path.join("ciphers", "3DES", "ECB"), ["TECBMMT1.rsp", "TECBMMT2.rsp", "TECBMMT3.rsp"], lambda key1, key2, key3, **kwargs: algorithms.TripleDES( binascii.unhexlify(key1 + key2 + key3) ), lambda **kwargs: modes.ECB(), )
TestTripleDESModeECB
python
PrefectHQ__prefect
src/prefect/server/orchestration/global_policy.py
{ "start": 2718, "end": 3656 }
class ____(BaseOrchestrationPolicy[orm_models.TaskRun, core.TaskRunPolicy]): """ Global transforms that run against task-run-state transitions in priority order. These transforms are intended to run immediately before and after a state transition is validated. """ @staticmethod def priority() -> list[ Union[ type[BaseUniversalTransform[orm_models.TaskRun, core.TaskRunPolicy]], type[BaseOrchestrationRule[orm_models.TaskRun, core.TaskRunPolicy]], ] ]: return cast( list[ Union[ type[ BaseUniversalTransform[orm_models.TaskRun, core.TaskRunPolicy] ], type[BaseOrchestrationRule[orm_models.TaskRun, core.TaskRunPolicy]], ] ], COMMON_GLOBAL_TRANSFORMS(), ) + [IncrementTaskRunCount]
GlobalTaskPolicy
python
getsentry__sentry
src/sentry/conf/types/sdk_config.py
{ "start": 173, "end": 1255 }
class ____(TypedDict): release: str | None environment: str project_root: str in_app_include: list[str] debug: bool send_default_pii: bool auto_enabling_integrations: bool keep_alive: NotRequired[bool] spotlight: NotRequired[str | bool | None] add_full_stack: NotRequired[bool] send_client_reports: NotRequired[bool] traces_sampler: NotRequired[Callable[[dict[str, Any]], float]] before_send: NotRequired[Callable[[Event, Hint], Event | None]] before_send_transaction: NotRequired[Callable[[Event, Hint], Event | None]] profiles_sample_rate: NotRequired[float] profiles_sampler: NotRequired[Callable[[dict[str, Any]], float]] profiler_mode: NotRequired[Literal["sleep", "thread", "gevent", "unknown"]] profile_session_sample_rate: NotRequired[float] profile_lifecycle: NotRequired[Literal["manual", "trace"]] enable_db_query_source: NotRequired[bool] enable_http_request_source: NotRequired[bool] db_query_source_threshold_ms: NotRequired[int] _experiments: NotRequired[Any] # TODO
SdkConfig
python
django__django
django/contrib/postgres/forms/array.py
{ "start": 5857, "end": 8422 }
class ____(forms.Field): default_error_messages = { "item_invalid": _("Item %(nth)s in the array did not validate:"), } def __init__(self, base_field, size, *, remove_trailing_nulls=False, **kwargs): self.base_field = base_field self.size = size self.remove_trailing_nulls = remove_trailing_nulls widget = SplitArrayWidget(widget=base_field.widget, size=size) kwargs.setdefault("widget", widget) super().__init__(**kwargs) def _remove_trailing_nulls(self, values): index = None if self.remove_trailing_nulls: for i, value in reversed(list(enumerate(values))): if value in self.base_field.empty_values: index = i else: break if index is not None: values = values[:index] return values, index def to_python(self, value): value = super().to_python(value) return [self.base_field.to_python(item) for item in value] def clean(self, value): cleaned_data = [] errors = [] if not any(value) and self.required: raise ValidationError(self.error_messages["required"]) max_size = max(self.size, len(value)) for index in range(max_size): item = value[index] try: cleaned_data.append(self.base_field.clean(item)) except ValidationError as error: errors.append( prefix_validation_error( error, self.error_messages["item_invalid"], code="item_invalid", params={"nth": index + 1}, ) ) cleaned_data.append(item) else: errors.append(None) cleaned_data, null_index = self._remove_trailing_nulls(cleaned_data) if null_index is not None: errors = errors[:null_index] errors = list(filter(None, errors)) if errors: raise ValidationError(list(chain.from_iterable(errors))) return cleaned_data def has_changed(self, initial, data): try: data = self.to_python(data) except ValidationError: pass else: data, _ = self._remove_trailing_nulls(data) if initial in self.empty_values and data in self.empty_values: return False return super().has_changed(initial, data)
SplitArrayField
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 9198, "end": 9399 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ("MEMBER", "OWNER", "UNAFFILIATED")
EnterpriseUserAccountMembershipRole
python
pandas-dev__pandas
pandas/tests/series/methods/test_sort_index.py
{ "start": 278, "end": 7766 }
class ____: def test_sort_index_name(self, datetime_series): result = datetime_series.sort_index(ascending=False) assert result.name == datetime_series.name def test_sort_index(self, datetime_series): datetime_series.index = datetime_series.index._with_freq(None) rindex = list(datetime_series.index) np.random.default_rng(2).shuffle(rindex) random_order = datetime_series.reindex(rindex) sorted_series = random_order.sort_index() tm.assert_series_equal(sorted_series, datetime_series) # descending sorted_series = random_order.sort_index(ascending=False) tm.assert_series_equal( sorted_series, datetime_series.reindex(datetime_series.index[::-1]) ) # compat on level sorted_series = random_order.sort_index(level=0) tm.assert_series_equal(sorted_series, datetime_series) # compat on axis sorted_series = random_order.sort_index(axis=0) tm.assert_series_equal(sorted_series, datetime_series) msg = "No axis named 1 for object type Series" with pytest.raises(ValueError, match=msg): random_order.sort_values(axis=1) sorted_series = random_order.sort_index(level=0, axis=0) tm.assert_series_equal(sorted_series, datetime_series) with pytest.raises(ValueError, match=msg): random_order.sort_index(level=0, axis=1) def test_sort_index_inplace(self, datetime_series): datetime_series.index = datetime_series.index._with_freq(None) # For GH#11402 rindex = list(datetime_series.index) np.random.default_rng(2).shuffle(rindex) # descending random_order = datetime_series.reindex(rindex) result = random_order.sort_index(ascending=False, inplace=True) assert result is None expected = datetime_series.reindex(datetime_series.index[::-1]) expected.index = expected.index._with_freq(None) tm.assert_series_equal(random_order, expected) # ascending random_order = datetime_series.reindex(rindex) result = random_order.sort_index(ascending=True, inplace=True) assert result is None expected = datetime_series.copy() expected.index = expected.index._with_freq(None) tm.assert_series_equal(random_order, expected) def test_sort_index_level(self): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC")) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] res = s.sort_index(level="A") tm.assert_series_equal(backwards, res) res = s.sort_index(level=["A", "B"]) tm.assert_series_equal(backwards, res) res = s.sort_index(level="A", sort_remaining=False) tm.assert_series_equal(s, res) res = s.sort_index(level=["A", "B"], sort_remaining=False) tm.assert_series_equal(s, res) @pytest.mark.parametrize("level", ["A", 0]) # GH#21052 def test_sort_index_multiindex(self, level): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC")) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] # implicit sort_remaining=True res = s.sort_index(level=level) tm.assert_series_equal(backwards, res) # GH#13496 # sort has no effect without remaining lvls res = s.sort_index(level=level, sort_remaining=False) tm.assert_series_equal(s, res) def test_sort_index_kind(self, sort_kind): # GH#14444 & GH#13589: Add support for sort algo choosing series = Series(index=[3, 2, 1, 4, 3], dtype=object) expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object) index_sorted_series = series.sort_index(kind=sort_kind) tm.assert_series_equal(expected_series, index_sorted_series) def test_sort_index_na_position(self): series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object) expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object) index_sorted_series = series.sort_index(na_position="first") tm.assert_series_equal(expected_series_first, index_sorted_series) expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object) index_sorted_series = series.sort_index(na_position="last") tm.assert_series_equal(expected_series_last, index_sorted_series) def test_sort_index_intervals(self): s = Series( [np.nan, 1, 2, 3], IntervalIndex.from_arrays([0, 1, 2, 3], [1, 2, 3, 4]) ) result = s.sort_index() expected = s tm.assert_series_equal(result, expected) result = s.sort_index(ascending=False) expected = Series( [3, 2, 1, np.nan], IntervalIndex.from_arrays([3, 2, 1, 0], [4, 3, 2, 1]) ) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("inplace", [True, False]) @pytest.mark.parametrize( "original_list, sorted_list, ascending, ignore_index, output_index", [ ([2, 3, 6, 1], [2, 3, 6, 1], True, True, [0, 1, 2, 3]), ([2, 3, 6, 1], [2, 3, 6, 1], True, False, [0, 1, 2, 3]), ([2, 3, 6, 1], [1, 6, 3, 2], False, True, [0, 1, 2, 3]), ([2, 3, 6, 1], [1, 6, 3, 2], False, False, [3, 2, 1, 0]), ], ) def test_sort_index_ignore_index( self, inplace, original_list, sorted_list, ascending, ignore_index, output_index ): # GH 30114 ser = Series(original_list) expected = Series(sorted_list, index=output_index) kwargs = { "ascending": ascending, "ignore_index": ignore_index, "inplace": inplace, } if inplace: result_ser = ser.copy() result_ser.sort_index(**kwargs) else: result_ser = ser.sort_index(**kwargs) tm.assert_series_equal(result_ser, expected) tm.assert_series_equal(ser, Series(original_list)) def test_sort_index_ascending_list(self): # GH#16934 # Set up a Series with a three level MultiIndex arrays = [ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], ["one", "two", "one", "two", "one", "two", "one", "two"], [4, 3, 2, 1, 4, 3, 2, 1], ] tuples = zip(*arrays, strict=True) mi = MultiIndex.from_tuples(tuples, names=["first", "second", "third"]) ser = Series(range(8), index=mi) # Sort with boolean ascending result = ser.sort_index(level=["third", "first"], ascending=False) expected = ser.iloc[[4, 0, 5, 1, 6, 2, 7, 3]] tm.assert_series_equal(result, expected) # Sort with list of boolean ascending result = ser.sort_index(level=["third", "first"], ascending=[False, True]) expected = ser.iloc[[0, 4, 1, 5, 2, 6, 3, 7]] tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "ascending", [ None, (True, None), (False, "True"), ], ) def test_sort_index_ascending_bad_value_raises(self, ascending): ser = Series(range(10), index=[0, 3, 2, 1, 4, 5, 7, 6, 8, 9]) match = 'For argument "ascending" expected type bool' with pytest.raises(ValueError, match=match): ser.sort_index(ascending=ascending)
TestSeriesSortIndex
python
getsentry__sentry
tests/sentry/api/endpoints/test_index.py
{ "start": 505, "end": 1809 }
class ____(APITestCase): endpoint = "sentry-api-index" def test_session_auth(self) -> None: self.login_as(user=self.user) response = self.get_success_response() assert response.data["version"] == "0" assert response.data["user"]["id"] == str(self.user.id) assert not response.data["auth"] def test_key_auth(self) -> None: org = self.create_organization() key = ApiKey.objects.create(organization_id=org.id) url = reverse("sentry-api-index") response = self.client.get( url, HTTP_AUTHORIZATION=self.create_basic_auth_header(key.key), ) assert response.status_code == 200 assert response.data["version"] == "0" assert response.data["auth"]["scopes"] == key.get_scopes() assert not response.data["user"] def test_token_auth(self) -> None: token = ApiToken.objects.create(user=self.user) url = reverse("sentry-api-index") response = self.client.get(url, HTTP_AUTHORIZATION=f"Bearer {token.token}") assert response.status_code == 200 assert response.data["version"] == "0" assert response.data["auth"]["scopes"] == token.get_scopes() assert response.data["user"]["id"] == str(self.user.id)
ApiIndexTest
python
PyCQA__pylint
tests/functional/p/postponed/postponed_evaluation_pep585.py
{ "start": 836, "end": 896 }
class ____(NamedTuple): my_var: list[int]
CustomNamedTuple2
python
kamyu104__LeetCode-Solutions
Python/partition-string-into-minimum-beautiful-substrings.py
{ "start": 36, "end": 698 }
class ____(object): def minimumBeautifulSubstrings(self, s): """ :type s: str :rtype: int """ max_pow_5 = 1 while max_pow_5*5 <= (1<<len(s))-1: max_pow_5 *= 5 dp = [float("inf")]*(len(s)+1) dp[0] = 0 for i in xrange(len(s)): if s[i] == '0': continue curr = 0 for j in xrange(i, len(s)): curr = curr*2+int(s[j]) if max_pow_5%curr == 0: dp[j+1] = min(dp[j+1], dp[(i-1)+1]+1) return dp[-1] if dp[-1] != float("inf") else -1 # Time: O(n^2) # Space: O(n) # dp
Solution
python
numpy__numpy
numpy/_utils/_pep440.py
{ "start": 3528, "end": 4294 }
class ____: def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, _BaseVersion): return NotImplemented return method(self._key, other._key)
_BaseVersion
python
Textualize__textual
src/textual/visual.py
{ "start": 1559, "end": 2033 }
class ____(Protocol): """An object that supports the textualize protocol.""" def visualize(self, widget: Widget, obj: object) -> Visual | None: """Convert the result of a Widget.render() call into a Visual, using the Visual protocol. Args: widget: The widget that generated the render. obj: The result of the render. Returns: A Visual instance, or `None` if it wasn't possible. """
SupportsVisual
python
openai__openai-python
src/openai/resources/audio/speech.py
{ "start": 9651, "end": 9902 }
class ____: def __init__(self, speech: Speech) -> None: self._speech = speech self.create = to_custom_streamed_response_wrapper( speech.create, StreamedBinaryAPIResponse, )
SpeechWithStreamingResponse
python
pytorch__pytorch
test/test_datapipe.py
{ "start": 90311, "end": 102131 }
class ____(TestCase): def test_isinstance(self): class A(IterDataPipe): pass class B(IterDataPipe): pass a = A() self.assertTrue(isinstance(a, A)) self.assertFalse(isinstance(a, B)) def test_protocol(self): try: from typing import Protocol # type: ignore[attr-defined] except ImportError: from typing import _Protocol # type: ignore[attr-defined] Protocol = _Protocol class P(Protocol): pass class A(IterDataPipe[P]): pass @skipTyping def test_subtype(self): from torch.utils.data.datapipes._typing import issubtype basic_type = (int, str, bool, float, complex, list, tuple, dict, set, T_co) for t in basic_type: self.assertTrue(issubtype(t, t)) self.assertTrue(issubtype(t, Any)) if t == T_co: self.assertTrue(issubtype(Any, t)) else: self.assertFalse(issubtype(Any, t)) for t1, t2 in itertools.product(basic_type, basic_type): if t1 == t2 or t2 == T_co: self.assertTrue(issubtype(t1, t2)) else: self.assertFalse(issubtype(t1, t2)) T = TypeVar("T", int, str) S = TypeVar("S", bool, Union[str, int], tuple[int, T]) # type: ignore[valid-type] types = ( (int, Optional[int]), (list, Union[int, list]), (tuple[int, str], S), (tuple[int, str], tuple), (T, S), (S, T_co), (T, Union[S, set]), ) for sub, par in types: self.assertTrue(issubtype(sub, par)) self.assertFalse(issubtype(par, sub)) subscriptable_types = { list: 1, tuple: 2, # use 2 parameters set: 1, dict: 2, } for subscript_type, n in subscriptable_types.items(): for ts in itertools.combinations(types, n): subs, pars = zip(*ts) sub = subscript_type[subs] # type: ignore[index] par = subscript_type[pars] # type: ignore[index] self.assertTrue(issubtype(sub, par)) self.assertFalse(issubtype(par, sub)) # Non-recursive check self.assertTrue(issubtype(par, sub, recursive=False)) @skipTyping def test_issubinstance(self): from torch.utils.data.datapipes._typing import issubinstance basic_data = (1, "1", True, 1.0, complex(1.0, 0.0)) basic_type = (int, str, bool, float, complex) S = TypeVar("S", bool, Union[str, int]) for d in basic_data: self.assertTrue(issubinstance(d, Any)) self.assertTrue(issubinstance(d, T_co)) if type(d) in (bool, int, str): self.assertTrue(issubinstance(d, S)) else: self.assertFalse(issubinstance(d, S)) for t in basic_type: if type(d) is t: self.assertTrue(issubinstance(d, t)) else: self.assertFalse(issubinstance(d, t)) # list/set dt = (([1, "1", 2], list), (set({1, "1", 2}), set)) for d, t in dt: self.assertTrue(issubinstance(d, t)) self.assertTrue(issubinstance(d, t[T_co])) # type: ignore[index] self.assertFalse(issubinstance(d, t[int])) # type: ignore[index] # dict d = {"1": 1, "2": 2.0} self.assertTrue(issubinstance(d, dict)) self.assertTrue(issubinstance(d, dict[str, T_co])) self.assertFalse(issubinstance(d, dict[str, int])) # tuple d = (1, "1", 2) self.assertTrue(issubinstance(d, tuple)) self.assertTrue(issubinstance(d, tuple[int, str, T_co])) self.assertFalse(issubinstance(d, tuple[int, Any])) self.assertFalse(issubinstance(d, tuple[int, int, int])) # Static checking annotation @skipTyping def test_compile_time(self): with self.assertRaisesRegex(TypeError, r"Expected 'Iterator' as the return"): class InvalidDP1(IterDataPipe[int]): def __iter__(self) -> str: # type: ignore[misc, override] yield 0 with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"): class InvalidDP2(IterDataPipe[tuple]): def __iter__(self) -> Iterator[int]: # type: ignore[override] yield 0 with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"): class InvalidDP3(IterDataPipe[tuple[int, str]]): def __iter__(self) -> Iterator[tuple]: # type: ignore[override] yield (0,) class DP1(IterDataPipe[tuple[int, str]]): def __init__(self, length): self.length = length def __iter__(self) -> Iterator[tuple[int, str]]: for d in range(self.length): yield d, str(d) self.assertTrue(issubclass(DP1, IterDataPipe)) dp1 = DP1(10) self.assertTrue(DP1.type.issubtype(dp1.type) and dp1.type.issubtype(DP1.type)) # type: ignore[attr-defined] dp1_ = DP1(5) self.assertEqual(dp1.type, dp1_.type) with self.assertRaisesRegex(TypeError, r"is not a generic class"): class InvalidDP5(DP1[tuple]): # type: ignore[type-arg] def __iter__(self) -> Iterator[tuple]: # type: ignore[override] yield (0,) class DP2(IterDataPipe[T_co]): def __iter__(self) -> Iterator[T_co]: yield from range(10) # type: ignore[misc] self.assertTrue(issubclass(DP2, IterDataPipe)) dp2 = DP2() # type: ignore[var-annotated] self.assertTrue(DP2.type.issubtype(dp2.type) and dp2.type.issubtype(DP2.type)) # type: ignore[attr-defined] dp2_ = DP2() # type: ignore[var-annotated] self.assertEqual(dp2.type, dp2_.type) class DP3(IterDataPipe[tuple[T_co, str]]): r"""DataPipe without fixed type with __init__ function""" def __init__(self, datasource): self.datasource = datasource def __iter__(self) -> Iterator[tuple[T_co, str]]: for d in self.datasource: yield d, str(d) self.assertTrue(issubclass(DP3, IterDataPipe)) dp3 = DP3(range(10)) # type: ignore[var-annotated] self.assertTrue(DP3.type.issubtype(dp3.type) and dp3.type.issubtype(DP3.type)) # type: ignore[attr-defined] dp3_ = DP3(5) # type: ignore[var-annotated] self.assertEqual(dp3.type, dp3_.type) class DP4(IterDataPipe[tuple]): r"""DataPipe without __iter__ annotation""" def __iter__(self): raise NotImplementedError self.assertTrue(issubclass(DP4, IterDataPipe)) dp4 = DP4() self.assertTrue(dp4.type.param is tuple) class DP5(IterDataPipe): r"""DataPipe without type annotation""" def __iter__(self) -> Iterator[str]: raise NotImplementedError self.assertTrue(issubclass(DP5, IterDataPipe)) dp5 = DP5() from torch.utils.data.datapipes._typing import issubtype self.assertTrue( issubtype(dp5.type.param, Any) and issubtype(Any, dp5.type.param) ) class DP6(IterDataPipe[int]): r"""DataPipe with plain Iterator""" def __iter__(self) -> Iterator: raise NotImplementedError self.assertTrue(issubclass(DP6, IterDataPipe)) dp6 = DP6() self.assertTrue(dp6.type.param is int) class DP7(IterDataPipe[Awaitable[T_co]]): r"""DataPipe with abstract base class""" self.assertTrue(issubclass(DP7, IterDataPipe)) self.assertTrue(DP7.type.param == Awaitable[T_co]) # type: ignore[attr-defined] class DP8(DP7[str]): r"""DataPipe subclass from a DataPipe with abc type""" self.assertTrue(issubclass(DP8, IterDataPipe)) self.assertTrue(DP8.type.param == Awaitable[str]) # type: ignore[attr-defined] @skipTyping def test_construct_time(self): class DP0(IterDataPipe[tuple]): @argument_validation def __init__(self, dp: IterDataPipe): self.dp = dp def __iter__(self) -> Iterator[tuple]: for d in self.dp: yield d, str(d) class DP1(IterDataPipe[int]): @argument_validation def __init__(self, dp: IterDataPipe[tuple[int, str]]): self.dp = dp def __iter__(self) -> Iterator[int]: for a, _ in self.dp: yield a # Non-DataPipe input with DataPipe hint datasource = [(1, "1"), (2, "2"), (3, "3")] with self.assertRaisesRegex( TypeError, r"Expected argument 'dp' as a IterDataPipe" ): dp0 = DP0(datasource) dp0 = DP0(dp.iter.IterableWrapper(range(10))) with self.assertRaisesRegex( TypeError, r"Expected type of argument 'dp' as a subtype" ): DP1(dp0) @skipTyping def test_runtime(self): class DP(IterDataPipe[tuple[int, T_co]]): def __init__(self, datasource): self.ds = datasource @runtime_validation def __iter__(self) -> Iterator[tuple[int, T_co]]: yield from self.ds dss = ([(1, "1"), (2, "2")], [(1, 1), (2, "2")]) for ds in dss: dp0 = DP(ds) # type: ignore[var-annotated] self.assertEqual(list(dp0), ds) # Reset __iter__ self.assertEqual(list(dp0), ds) dss = ( [(1, 1), ("2", 2)], # type: ignore[assignment, list-item] [[1, "1"], [2, "2"]], # type: ignore[list-item] [1, "1", 2, "2"], ) for ds in dss: dp0 = DP(ds) with self.assertRaisesRegex( RuntimeError, r"Expected an instance as subtype" ): list(dp0) with runtime_validation_disabled(): self.assertEqual(list(dp0), ds) with runtime_validation_disabled(): self.assertEqual(list(dp0), ds) with self.assertRaisesRegex( RuntimeError, r"Expected an instance as subtype" ): list(dp0) @skipTyping def test_reinforce(self): T = TypeVar("T", int, str) class DP(IterDataPipe[T]): def __init__(self, ds): self.ds = ds @runtime_validation def __iter__(self) -> Iterator[T]: yield from self.ds ds = list(range(10)) # Valid type reinforcement dp0 = DP(ds).reinforce_type(int) self.assertTrue(dp0.type, int) self.assertEqual(list(dp0), ds) # Invalid type with self.assertRaisesRegex(TypeError, r"'expected_type' must be a type"): DP(ds).reinforce_type(1) # Type is not subtype with self.assertRaisesRegex( TypeError, r"Expected 'expected_type' as subtype of" ): DP(ds).reinforce_type(float) # Invalid data at runtime dp3 = DP(ds).reinforce_type(str) with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"): list(dp3) # Context Manager to disable the runtime validation with runtime_validation_disabled(): self.assertEqual(list(dp3), ds)
TestTyping
python
airbytehq__airbyte
airbyte-ci/connectors/connector_ops/connector_ops/utils.py
{ "start": 7872, "end": 8012 }
class ____(str, Enum): PYTHON = "python" JAVA = "java" LOW_CODE = "low-code" MANIFEST_ONLY = "manifest-only"
ConnectorLanguage
python
tensorflow__tensorflow
tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py
{ "start": 21769, "end": 30981 }
class ____(test.TestCase): def doTestBasic(self, use_resource=False, use_callable_params=False): if context.executing_eagerly() and not use_resource: self.skipTest( "Skipping test with use_resource=False and executing eagerly." ) for i, dtype in enumerate([dtypes.float32]): with self.session(graph=ops.Graph()): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) if use_resource: var0 = resource_variable_ops.ResourceVariable( var0_np, name="var0_%d" % i ) var1 = resource_variable_ops.ResourceVariable( var1_np, name="var1_%d" % i ) else: var0 = variables.RefVariable(var0_np) var1 = variables.RefVariable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) learning_rate = lambda: 0.001 beta1 = lambda: 0.9 beta2 = lambda: 0.999 epsilon = lambda: 1e-8 if not use_callable_params: learning_rate = learning_rate() beta1 = beta1() beta2 = beta2() epsilon = epsilon() opt = adam.AdamOptimizer(learning_rate=learning_rate) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) opt_variables = opt.variables() beta1_power, beta2_power = opt._get_beta_accumulators() self.assertIsNotNone(beta1_power) self.assertIsNotNone(beta2_power) self.assertIn(beta1_power, opt_variables) self.assertIn(beta2_power, opt_variables) # Ensure that non-slot variables are the same type as the requested # variables. self.assertEqual( use_resource, resource_variable_ops.is_resource_variable(beta1_power), ) self.assertEqual( use_resource, resource_variable_ops.is_resource_variable(beta2_power), ) if not context.executing_eagerly(): with ops.Graph().as_default(): # Shouldn't return non-slot variables from other graphs. self.assertEqual(0, len(opt.variables())) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) beta1_power, beta2_power = opt._get_beta_accumulators() # Run 3 steps of Adam for t in range(1, 4): if not context.executing_eagerly(): self.evaluate(update) elif t > 1: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.assertAllCloseAccordingToType( 0.9 ** (t + 1), self.evaluate(beta1_power) ) self.assertAllCloseAccordingToType( 0.999 ** (t + 1), self.evaluate(beta2_power) ) var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) var0_eval = self.evaluate(var0) var1_eval = self.evaluate(var1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, var0_eval) self.assertAllCloseAccordingToType(var1_np, var1_eval) if use_resource: self.assertEqual( "var0_%d/Adam:0" % (i,), opt.get_slot(var=var0, name="m").name ) def testBasic(self): self.doTestBasic(use_resource=True) @test_util.run_in_graph_and_eager_modes def testResourceBasic(self): self.doTestBasic(use_resource=True) def testBasicCallableParams(self): with context.eager_mode(): self.doTestBasic(use_resource=True, use_callable_params=True) @test_util.run_deprecated_v1 def testTensorLearningRate(self): for dtype in [dtypes.float32]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = variables.Variable(var0_np) var1 = variables.Variable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = adam.AdamOptimizer(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) beta1_power, beta2_power = opt._get_beta_accumulators() # Run 3 steps of Adam for t in range(1, 4): self.assertAllCloseAccordingToType( 0.9**t, self.evaluate(beta1_power) ) self.assertAllCloseAccordingToType( 0.999**t, self.evaluate(beta2_power) ) update.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @test_util.run_deprecated_v1 def testSharing(self): for dtype in [dtypes.float32]: with self.cached_session(): # Initialize variables for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = variables.Variable(var0_np) var1 = variables.Variable(var1_np) grads0 = constant_op.constant(grads0_np) grads1 = constant_op.constant(grads1_np) opt = adam.AdamOptimizer() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() beta1_power, beta2_power = opt._get_beta_accumulators() # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of intertwined Adam1 and Adam2. for t in range(1, 4): self.assertAllCloseAccordingToType( 0.9**t, self.evaluate(beta1_power) ) self.assertAllCloseAccordingToType( 0.999**t, self.evaluate(beta2_power) ) if t % 2 == 0: update1.run() else: update2.run() var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testTwoSessions(self): optimizer = adam.AdamOptimizer() with context.eager_mode(): var0 = variables.Variable( np.array([1.0, 2.0], dtype=np.float32), name="v0" ) grads0 = constant_op.constant(np.array([0.1, 0.1], dtype=np.float32)) optimizer.apply_gradients([(grads0, var0)]) g = ops.Graph() with g.as_default(): with session.Session(): var0 = variables.Variable( np.array([1.0, 2.0], dtype=np.float32), name="v0" ) grads0 = constant_op.constant(np.array([0.1, 0.1], dtype=np.float32)) optimizer.apply_gradients([(grads0, var0)]) gg = ops.Graph() with gg.as_default(): with session.Session(): var0 = variables.Variable(np.array([1.0, 2.0]), name="v0") grads0 = constant_op.constant(np.array([0.1, 0.1])) # If the optimizer saves any state not keyed by graph the following line # fails. optimizer.apply_gradients([(grads0, var0)]) def testSlotsUniqueEager(self): with context.eager_mode(): v1 = resource_variable_ops.ResourceVariable(1.0) v2 = resource_variable_ops.ResourceVariable(1.0) opt = adam.AdamOptimizer(1.0) opt.minimize(lambda: v1 + v2) # There should be two non-slot variables, and two unique slot variables # for v1 and v2 respectively. self.assertEqual(6, len({id(v) for v in opt.variables()}))
AdamOptimizerTest
python
scrapy__scrapy
tests/test_spidermiddleware_process_start.py
{ "start": 1011, "end": 1322 }
class ____: async def process_start(self, start): async for item_or_request in start: yield item_or_request def process_start_requests(self, start_requests, spider): raise NotImplementedError # Spiders and spider middlewares for TestMain._test_wrap
UniversalSpiderMiddleware
python
kamyu104__LeetCode-Solutions
Python/linked-list-random-node.py
{ "start": 147, "end": 839 }
class ____(object): def __init__(self, head): """ :type head: Optional[ListNode] """ self.__head = head # Proof of Reservoir Sampling: # https://discuss.leetcode.com/topic/53753/brief-explanation-for-reservoir-sampling def getRandom(self): """ :rtype: int """ reservoir = -1 curr, n = self.__head, 0 while curr: reservoir = curr.val if randint(1, n+1) == 1 else reservoir curr, n = curr.next, n+1 return reservoir # Time: ctor: O(n) # getRandom: O(1) # Space: O(n) from random import randint # if the length is known with using extra space
Solution
python
tensorflow__tensorflow
tensorflow/compiler/tests/complex_div_test.py
{ "start": 1138, "end": 6289 }
class ____(xla_test.XLATestCase): """Test cases for complex numbers division operators.""" def _testBinary(self, op, a, b, expected, equality_test=None): with self.session() as session: with self.test_scope(): pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a") pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b") output = op(pa, pb) result = session.run(output, {pa: a, pb: b}) if equality_test is None: equality_test = self.assertAllCloseAccordingToType equality_test(np.real(result), np.real(expected), rtol=1e-3) equality_test(np.imag(result), np.imag(expected), rtol=1e-3) def testComplexOps(self): for dtype in self.complex_types: # Test division by 0 scenarios. self._testBinary( gen_math_ops.real_div, np.array([ complex(1, 1), complex(1, np.inf), complex(1, np.nan), complex(np.inf, 1), complex(np.inf, np.inf), complex(np.inf, np.nan), complex(np.nan, 1), complex(np.nan, np.inf), complex(np.nan, np.nan), complex(-np.inf, np.nan), ], dtype=dtype), np.array([ 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0 + 0j, 0.0 + 0j, ], dtype=dtype), expected=np.array([ complex(np.inf, np.inf), complex(np.inf, np.inf), complex(np.inf, np.nan), complex(np.inf, np.inf), complex(np.inf, np.inf), complex(np.inf, np.nan), complex(np.nan, np.inf), complex(np.nan, np.inf), complex(np.nan, np.nan), complex(-np.inf, np.nan), ], dtype=dtype)) # Test division with finite numerator, inf/nan denominator. self._testBinary( gen_math_ops.real_div, np.array([ 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, ], dtype=dtype), np.array( [ complex(1, np.inf), complex(1, np.nan), complex(np.inf, 1), complex(np.inf, np.inf), # C++ and Python diverge here. complex(np.inf, np.nan), # C++ and Python diverge here. complex(np.nan, 1), complex(np.nan, np.inf), # C++ and Python diverge here. complex(np.nan, -np.inf), # C++ and Python diverge here. complex(np.nan, np.nan), ], dtype=dtype), expected=np.array( [ (1 + 1j) / complex(1, np.inf), (1 + 1j) / complex(1, np.nan), (1 + 1j) / complex(np.inf, 1), complex(0 + 0j), # C++ and Python diverge here. complex(0 + 0j), # C++ and Python diverge here. (1 + 1j) / complex(np.nan, 1), complex(0 + 0j), # C++ and Python diverge here. complex(0 - 0j), # C++ and Python diverge here. (1 + 1j) / complex(np.nan, np.nan), ], dtype=dtype)) # Test division with inf/nan numerator, infinite denominator. self._testBinary( gen_math_ops.real_div, np.array([ complex(1, np.inf), complex(1, np.nan), complex(np.inf, 1), complex(np.inf, np.inf), complex(np.inf, np.nan), complex(np.nan, 1), complex(np.nan, np.inf), complex(np.nan, np.nan), complex(np.nan, -np.inf), ], dtype=dtype), np.array([ 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, 1 + 1j, -1 - 1j, ], dtype=dtype), expected=np.array( [ complex(np.inf, np.inf), # C++ and Python diverge here. complex(1 / np.nan) / (1 + 1j), complex(np.inf / 1) / (1 + 1j), complex(np.inf, -np.nan), # C++ and Python diverge here. complex(np.inf, -np.inf), # C++ and Python diverge here. complex(np.nan / 1) / (1 + 1j), complex(np.inf, np.inf), # C++ and Python diverge here. complex(np.nan / np.nan) / (1 + 1j), complex(np.inf, np.inf), # C++ and Python diverge here. ], dtype=dtype)) if __name__ == "__main__": googletest.main()
ComplexNumbersDivisionTest
python
scipy__scipy
scipy/fftpack/tests/test_helper.py
{ "start": 428, "end": 993 }
class ____: def test_definition(self): x = [0,1,2,3,4,-4,-3,-2,-1] y = [-4,-3,-2,-1,0,1,2,3,4] assert_array_almost_equal(fftshift(x),y) assert_array_almost_equal(ifftshift(y),x) x = [0,1,2,3,4,-5,-4,-3,-2,-1] y = [-5,-4,-3,-2,-1,0,1,2,3,4] assert_array_almost_equal(fftshift(x),y) assert_array_almost_equal(ifftshift(y),x) def test_inverse(self): for n in [1,4,9,100,211]: x = random.random((n,)) assert_array_almost_equal(ifftshift(fftshift(x)),x)
TestFFTShift
python
apache__airflow
airflow-core/src/airflow/utils/sqlalchemy.py
{ "start": 8455, "end": 14202 }
class ____(PickleType): """ Adds special handling for K8s executor config. If we unpickle a k8s object that was pickled under an earlier k8s library version, then the unpickled object may throw an error when to_dict is called. To be more tolerant of version changes we convert to JSON using Airflow's serializer before pickling. """ cache_ok = True def bind_processor(self, dialect): from airflow.serialization.serialized_objects import BaseSerialization super_process = super().bind_processor(dialect) def process(value): val_copy = copy.copy(value) if isinstance(val_copy, dict) and "pod_override" in val_copy: val_copy["pod_override"] = BaseSerialization.serialize(val_copy["pod_override"]) return super_process(val_copy) return process def result_processor(self, dialect, coltype): from airflow.serialization.serialized_objects import BaseSerialization super_process = super().result_processor(dialect, coltype) def process(value): value = super_process(value) # unpickle if isinstance(value, dict) and "pod_override" in value: pod_override = value["pod_override"] if isinstance(pod_override, dict) and pod_override.get(Encoding.TYPE): # If pod_override was serialized with Airflow's BaseSerialization, deserialize it value["pod_override"] = BaseSerialization.deserialize(pod_override) else: # backcompat path # we no longer pickle raw pods but this code may be reached # when accessing executor configs created in a prior version new_pod = ensure_pod_is_valid_after_unpickling(pod_override) if new_pod: value["pod_override"] = new_pod return value return process def compare_values(self, x, y): """ Compare x and y using self.comparator if available. Else, use __eq__. The TaskInstance.executor_config attribute is a pickled object that may contain kubernetes objects. If the installed library version has changed since the object was originally pickled, due to the underlying ``__eq__`` method on these objects (which converts them to JSON), we may encounter attribute errors. In this case we should replace the stored object. From https://github.com/apache/airflow/pull/24356 we use our serializer to store k8s objects, but there could still be raw pickled k8s objects in the database, stored from earlier version, so we still compare them defensively here. """ if self.comparator: return self.comparator(x, y) try: return x == y except AttributeError: return False def nulls_first(col: ColumnElement, session: Session) -> ColumnElement: """ Specify *NULLS FIRST* to the column ordering. This is only done to Postgres, currently the only backend that supports it. Other databases do not need it since NULL values are considered lower than any other values, and appear first when the order is ASC (ascending). """ if get_dialect_name(session) == "postgresql": return nullsfirst(col) return col USE_ROW_LEVEL_LOCKING: bool = conf.getboolean("scheduler", "use_row_level_locking", fallback=True) def with_row_locks( query: Query[Any] | Select[Any], session: Session, *, nowait: bool = False, skip_locked: bool = False, key_share: bool = True, **kwargs, ) -> Query[Any] | Select[Any]: """ Apply with_for_update to the SQLAlchemy query if row level locking is in use. This wrapper is needed so we don't use the syntax on unsupported database engines. In particular, MySQL (prior to 8.0) and MariaDB do not support row locking, where we do not support nor recommend running HA scheduler. If a user ignores this and tries anyway, everything will still work, just slightly slower in some circumstances. See https://jira.mariadb.org/browse/MDEV-13115 :param query: An SQLAlchemy Query object :param session: ORM Session :param nowait: If set to True, will pass NOWAIT to supported database backends. :param skip_locked: If set to True, will pass SKIP LOCKED to supported database backends. :param key_share: If true, will lock with FOR KEY SHARE UPDATE (at least on postgres). :param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc) :return: updated query """ try: dialect_name = get_dialect_name(session) except ValueError: return query if not dialect_name: return query # Don't use row level locks if the MySQL dialect (Mariadb & MySQL < 8) does not support it. if not USE_ROW_LEVEL_LOCKING: return query if dialect_name == "mysql" and not getattr( session.bind.dialect if session.bind else None, "supports_for_update_of", False ): return query if nowait: kwargs["nowait"] = True if skip_locked: kwargs["skip_locked"] = True if key_share: kwargs["key_share"] = True return query.with_for_update(**kwargs) @contextlib.contextmanager def lock_rows(query: Select, session: Session) -> Generator[None, None, None]: """ Lock database rows during the context manager block. This is a convenient method for ``with_row_locks`` when we don't need the locked rows. :meta private: """ locked_rows = with_row_locks(query, session) yield del locked_rows
ExecutorConfigType
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/extra/pandas/impl.py
{ "start": 12535, "end": 28505 }
class ____(Generic[Ex]): """Data object for describing a column in a DataFrame. Arguments: * name: the column name, or None to default to the column position. Must be hashable, but can otherwise be any value supported as a pandas column name. * elements: the strategy for generating values in this column, or None to infer it from the dtype. * dtype: the dtype of the column, or None to infer it from the element strategy. At least one of dtype or elements must be provided. * fill: A default value for elements of the column. See :func:`~hypothesis.extra.numpy.arrays` for a full explanation. * unique: If all values in this column should be distinct. """ name: str | int | None = None elements: st.SearchStrategy[Ex] | None = None dtype: Any = None fill: st.SearchStrategy[Ex] | None = None unique: bool = False def columns( names_or_number: int | Sequence[str], *, dtype: Any = None, elements: st.SearchStrategy[Ex] | None = None, fill: st.SearchStrategy[Ex] | None = None, unique: bool = False, ) -> list[column[Ex]]: """A convenience function for producing a list of :class:`column` objects of the same general shape. The names_or_number argument is either a sequence of values, the elements of which will be used as the name for individual column objects, or a number, in which case that many unnamed columns will be created. All other arguments are passed through verbatim to create the columns. """ if isinstance(names_or_number, (int, float)): names: list[int | str | None] = [None] * names_or_number else: names = list(names_or_number) return [ column(name=n, dtype=dtype, elements=elements, fill=fill, unique=unique) for n in names ] @defines_strategy() def data_frames( columns: Sequence[column] | None = None, *, rows: st.SearchStrategy[dict | Sequence[Any]] | None = None, index: st.SearchStrategy[Ex] | None = None, ) -> st.SearchStrategy[pandas.DataFrame]: """Provides a strategy for producing a :class:`pandas.DataFrame`. Arguments: * columns: An iterable of :class:`column` objects describing the shape of the generated DataFrame. * rows: A strategy for generating a row object. Should generate either dicts mapping column names to values or a sequence mapping column position to the value in that position (note that unlike the :class:`pandas.DataFrame` constructor, single values are not allowed here. Passing e.g. an integer is an error, even if there is only one column). At least one of rows and columns must be provided. If both are provided then the generated rows will be validated against the columns and an error will be raised if they don't match. Caveats on using rows: * In general you should prefer using columns to rows, and only use rows if the columns interface is insufficiently flexible to describe what you need - you will get better performance and example quality that way. * If you provide rows and not columns, then the shape and dtype of the resulting DataFrame may vary. e.g. if you have a mix of int and float in the values for one column in your row entries, the column will sometimes have an integral dtype and sometimes a float. * index: If not None, a strategy for generating indexes for the resulting DataFrame. This can generate either :class:`pandas.Index` objects or any sequence of values (which will be passed to the Index constructor). You will probably find it most convenient to use the :func:`~hypothesis.extra.pandas.indexes` or :func:`~hypothesis.extra.pandas.range_indexes` function to produce values for this argument. Usage: The expected usage pattern is that you use :class:`column` and :func:`columns` to specify a fixed shape of the DataFrame you want as follows. For example the following gives a two column data frame: .. code-block:: pycon >>> from hypothesis.extra.pandas import column, data_frames >>> data_frames([ ... column('A', dtype=int), column('B', dtype=float)]).example() A B 0 2021915903 1.793898e+232 1 1146643993 inf 2 -2096165693 1.000000e+07 If you want the values in different columns to interact in some way you can use the rows argument. For example the following gives a two column DataFrame where the value in the first column is always at most the value in the second: .. code-block:: pycon >>> from hypothesis.extra.pandas import column, data_frames >>> import hypothesis.strategies as st >>> data_frames( ... rows=st.tuples(st.floats(allow_nan=False), ... st.floats(allow_nan=False)).map(sorted) ... ).example() 0 1 0 -3.402823e+38 9.007199e+15 1 -1.562796e-298 5.000000e-01 You can also combine the two: .. code-block:: pycon >>> from hypothesis.extra.pandas import columns, data_frames >>> import hypothesis.strategies as st >>> data_frames( ... columns=columns(["lo", "hi"], dtype=float), ... rows=st.tuples(st.floats(allow_nan=False), ... st.floats(allow_nan=False)).map(sorted) ... ).example() lo hi 0 9.314723e-49 4.353037e+45 1 -9.999900e-01 1.000000e+07 2 -2.152861e+134 -1.069317e-73 (Note that the column dtype must still be specified and will not be inferred from the rows. This restriction may be lifted in future). Combining rows and columns has the following behaviour: * The column names and dtypes will be used. * If the column is required to be unique, this will be enforced. * Any values missing from the generated rows will be provided using the column's fill. * Any values in the row not present in the column specification (if dicts are passed, if there are keys with no corresponding column name, if sequences are passed if there are too many items) will result in InvalidArgument being raised. """ if index is None: index = range_indexes() else: check_strategy(index, "index") index_strategy = index if columns is None: if rows is None: raise InvalidArgument("At least one of rows and columns must be provided") else: @st.composite def rows_only(draw): index = draw(index_strategy) def row(): result = draw(rows) check_type(abc.Iterable, result, "draw(row)") return result if len(index) > 0: return pandas.DataFrame([row() for _ in index], index=index) else: # If we haven't drawn any rows we need to draw one row and # then discard it so that we get a consistent shape for the # DataFrame. base = pandas.DataFrame([row()]) return base.drop(0) return rows_only() assert columns is not None cols = try_convert(tuple, columns, "columns") rewritten_columns = [] column_names: set[str] = set() for i, c in enumerate(cols): check_type(column, c, f"columns[{i}]") c = copy(c) if c.name is None: label = f"columns[{i}]" c.name = i else: label = c.name try: hash(c.name) except TypeError: raise InvalidArgument( f"Column names must be hashable, but columns[{i}].name was " f"{c.name!r} of type {type(c.name).__name__}, which cannot be hashed." ) from None if c.name in column_names: raise InvalidArgument(f"duplicate definition of column name {c.name!r}") column_names.add(c.name) c.elements, _ = elements_and_dtype(c.elements, c.dtype, label) if c.dtype is None and rows is not None: raise InvalidArgument( "Must specify a dtype for all columns when combining rows with columns." ) c.fill = npst.fill_for( fill=c.fill, elements=c.elements, unique=c.unique, name=label ) rewritten_columns.append(c) if rows is None: @st.composite def just_draw_columns(draw): index = draw(index_strategy) local_index_strategy = st.just(index) data = OrderedDict((c.name, None) for c in rewritten_columns) # Depending on how the columns are going to be generated we group # them differently to get better shrinking. For columns with fill # enabled, the elements can be shrunk independently of the size, # so we can just shrink by shrinking the index then shrinking the # length and are generally much more free to move data around. # For columns with no filling the problem is harder, and drawing # them like that would result in rows being very far apart from # each other in the choice sequence, which gets in the way # of shrinking. So what we do is reorder and draw those columns # row wise, so that the values of each row are next to each other. # This makes life easier for the shrinker when deleting choices. columns_without_fill = [c for c in rewritten_columns if c.fill.is_empty] if columns_without_fill: for c in columns_without_fill: data[c.name] = pandas.Series( np.zeros(shape=len(index), dtype=object), index=index, dtype=c.dtype, ) seen = {c.name: set() for c in columns_without_fill if c.unique} for i in range(len(index)): for c in columns_without_fill: if c.unique: for _ in range(5): value = draw(c.elements) if value not in seen[c.name]: seen[c.name].add(value) break else: reject() else: value = draw(c.elements) try: data[c.name].iloc[i] = value except ValueError as err: # pragma: no cover # This just works in Pandas 1.4 and later, but gives # a confusing error on previous versions. if c.dtype is None and not isinstance( value, (float, int, str, bool, datetime, timedelta) ): raise ValueError( f"Failed to add {value=} to column " f"{c.name} with dtype=None. Maybe passing " "dtype=object would help?" ) from err # Unclear how this could happen, but users find a way... raise for c in rewritten_columns: if not c.fill.is_empty: data[c.name] = draw( series( index=local_index_strategy, dtype=c.dtype, elements=c.elements, fill=c.fill, unique=c.unique, ) ) return pandas.DataFrame(data, index=index) return just_draw_columns() else: @st.composite def assign_rows(draw): index = draw(index_strategy) result = pandas.DataFrame( OrderedDict( ( c.name, pandas.Series( np.zeros(dtype=c.dtype, shape=len(index)), dtype=c.dtype ), ) for c in rewritten_columns ), index=index, ) fills = {} any_unique = any(c.unique for c in rewritten_columns) if any_unique: all_seen = [set() if c.unique else None for c in rewritten_columns] while all_seen[-1] is None: all_seen.pop() for row_index in range(len(index)): for _ in range(5): original_row = draw(rows) row = original_row if isinstance(row, dict): as_list = [None] * len(rewritten_columns) for i, c in enumerate(rewritten_columns): try: as_list[i] = row[c.name] except KeyError: try: as_list[i] = fills[i] except KeyError: if c.fill.is_empty: raise InvalidArgument( f"Empty fill strategy in {c!r} cannot " f"complete row {original_row!r}" ) from None fills[i] = draw(c.fill) as_list[i] = fills[i] for k in row: if k not in column_names: raise InvalidArgument( f"Row {row!r} contains column {k!r} not in " f"columns {[c.name for c in rewritten_columns]!r})" ) row = as_list if any_unique: has_duplicate = False for seen, value in zip(all_seen, row, strict=False): if seen is None: continue if value in seen: has_duplicate = True break seen.add(value) if has_duplicate: continue row = list(try_convert(tuple, row, "draw(rows)")) if len(row) > len(rewritten_columns): raise InvalidArgument( f"Row {original_row!r} contains too many entries. Has " f"{len(row)} but expected at most {len(rewritten_columns)}" ) while len(row) < len(rewritten_columns): c = rewritten_columns[len(row)] if c.fill.is_empty: raise InvalidArgument( f"Empty fill strategy in {c!r} cannot " f"complete row {original_row!r}" ) row.append(draw(c.fill)) result.iloc[row_index] = row break else: reject() return result return assign_rows()
column
python
pytorch__pytorch
torch/distributed/elastic/multiprocessing/api.py
{ "start": 3456, "end": 5452 }
class ____(IntFlag): NONE = 0 OUT = 1 ERR = 2 ALL = OUT | ERR @classmethod def from_str(cls, vm: str) -> Union["Std", dict[int, "Std"]]: """ Example: :: from_str("0") -> Std.NONE from_str("1") -> Std.OUT from_str("0:3,1:0,2:1,3:2") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR} Any other input raises an exception """ def to_std(v: str) -> Std: # type: ignore[return] s = Std(int(v)) if s in Std: return s # return None -> should NEVER reach here since we regex check input if re.match(_VALUE_REGEX, vm): # vm is a number (e.g. 0) return to_std(vm) elif re.match(_MAPPING_REGEX, vm): # vm is a mapping (e.g. 0:1,1:2) d: dict[int, Std] = {} for m in vm.split(","): i, v = m.split(":") d[int(i)] = to_std(v) return d else: raise ValueError( f"{vm} does not match: <{_VALUE_REGEX}> or <{_MAPPING_REGEX}>" ) def to_map(val_or_map: Std | dict[int, Std], local_world_size: int) -> dict[int, Std]: """ Certain APIs take redirect settings either as a single value (e.g. apply to all local ranks) or as an explicit user-provided mapping. This method is a convenience method that converts a value or mapping into a mapping. Example: :: to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT} to_map( {0: Std.OUT, 1: Std.OUT}, local_world_size=2 ) # returns: {0: Std.OUT, 1: Std.OUT} """ if isinstance(val_or_map, Std): return dict.fromkeys(range(local_world_size), val_or_map) else: map = {} for i in range(local_world_size): map[i] = val_or_map.get(i, Std.NONE) return map @dataclass
Std
python
kamyu104__LeetCode-Solutions
Python/minimum-flips-in-binary-tree-to-get-result.py
{ "start": 1830, "end": 2799 }
class ____(object): def minimumFlips(self, root, result): """ :type root: Optional[TreeNode] :type result: bool :rtype: int """ INF = float("inf") OP = { 2: lambda x, y: x or y, 3: lambda x, y: x and y, 4: lambda x, y: x^y , 5: lambda x, y: not x if x is not None else not y } def dfs(node): if not node: return {None: 0} # null object pattern if node.left == node.right: return {True: node.val^1, False: node.val^0} left = dfs(node.left) right = dfs(node.right) dp = collections.defaultdict(lambda: INF) for k1, v1 in left.iteritems(): for k2, v2 in right.iteritems(): dp[OP[node.val](k1, k2)] = min(dp[OP[node.val](k1, k2)], v1+v2) return dp return dfs(root)[result]
Solution2
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
{ "start": 68312, "end": 71012 }
class ____(FSDPTest): # The messages might change when we move to a different NCCL version. # Please update this test if it starts failing. MEMORY_REGISTER_RE = ( "NCCL INFO register comm 0x[0-9a-f]+ buffer 0x[0-9a-f]+ size [0-9]+" ) @classmethod def _run(cls, *args, **kwargs): cls.nccl_log_dir = tempfile.TemporaryDirectory() os.environ["NCCL_DEBUG"] = "INFO" os.environ["NCCL_DEBUG_SUBSYS"] = "INIT,ENV,REG" os.environ["NCCL_DEBUG_FILE"] = cls.nccl_log_dir.name + "/nccl_log" super()._run(*args, **kwargs) @skip_if_lt_x_gpu(2) # The NCCL PG refuses to allocate tensors if multicast is unavailable, see # https://github.com/pytorch/pytorch/blob/503362d019b3782581492af7767945dbd75ca1c9/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp#L5634 @requires_multicast_support() def test_fully_shard_alloc_from_pg(self): torch.manual_seed(42) model_args = ModelArgs() model = Transformer(model_args) for module in model.modules(): if isinstance(module, TransformerBlock): fully_shard(module) fully_shard(model) torch.manual_seed(42 + self.rank) inp = torch.randint(0, model_args.vocab_size, (2, 16), device="cuda") loss = model(inp) loss.sum().backward() torch.distributed.barrier() torch.cuda.synchronize() with open(self.nccl_log_dir.name + "/nccl_log") as f: self.assertNotRegex(f.read(), self.MEMORY_REGISTER_RE) for module in model.modules(): if isinstance(module, TransformerBlock): module.set_allocate_memory_from_process_group_for_comm(True) model.set_allocate_memory_from_process_group_for_comm(True) loss = model(inp) loss.sum().backward() torch.distributed.barrier() torch.cuda.synchronize() with open(self.nccl_log_dir.name + "/nccl_log") as f: self.assertRegex(f.read(), self.MEMORY_REGISTER_RE) @skip_if_lt_x_gpu(2) def test_exception_when_used_together_with_comm_hooks(self): model = nn.Linear(16, 16) model = fully_shard(model) # ok model.set_allocate_memory_from_process_group_for_comm(True) # setting custom hook after is also ok # (overrides set_allocate_memory_from_process_group_for_comm) mock_all_gather = MagicMock(spec=AllGather) model.set_custom_all_gather(mock_all_gather) # setting this after custom comm is used is ko with self.assertRaises(AssertionError): model.set_allocate_memory_from_process_group_for_comm(True)
TestFullyShardAllocFromPG
python
ray-project__ray
release/train_tests/pytorch_lightning/test_lightning.py
{ "start": 356, "end": 3103 }
class ____(pl.LightningModule): def __init__(self): super().__init__() self.model = resnet18(num_classes=10) self.model.conv1 = torch.nn.Conv2d( 1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False ) self.criterion = torch.nn.CrossEntropyLoss() def forward(self, x): return self.model(x) def training_step(self, batch, batch_idx): x, y = batch outputs = self.forward(x) loss = self.criterion(outputs, y) self.log("loss", loss, on_step=True, prog_bar=True) return loss def configure_optimizers(self): return torch.optim.Adam(self.model.parameters(), lr=0.001) def train_func(): # Data transform = Compose([ToTensor(), Normalize((0.28604,), (0.32025,))]) data_dir = os.path.join(tempfile.gettempdir(), "data") train_data = FashionMNIST( root=data_dir, train=True, download=True, transform=transform ) train_dataloader = DataLoader(train_data, batch_size=128, shuffle=True) # Training model = ImageClassifier() # [1] Configure PyTorch Lightning Trainer. trainer = pl.Trainer( max_epochs=10, devices="auto", accelerator="auto", strategy=ray.train.lightning.RayDDPStrategy(), plugins=[ray.train.lightning.RayLightningEnvironment()], callbacks=[ray.train.lightning.RayTrainReportCallback()], # [1a] Optionally, disable the default checkpointing behavior # in favor of the `RayTrainReportCallback` above. enable_checkpointing=False, ) trainer = ray.train.lightning.prepare_trainer(trainer) trainer.fit(model, train_dataloaders=train_dataloader) def test_lightning_train_run(): # [2] Configure scaling and resource requirements. scaling_config = ray.train.ScalingConfig(num_workers=4, use_gpu=True) # [3] Launch distributed training job. trainer = TorchTrainer( train_func, scaling_config=scaling_config, # [3a] If running in a multi-node cluster, this is where you # should configure the run's persistent storage that is accessible # across all worker nodes. run_config=ray.train.RunConfig( storage_path="/mnt/cluster_storage/lightning_run" ), ) result: ray.train.Result = trainer.fit() # [4] Load the trained model. with result.checkpoint.as_directory() as checkpoint_dir: model = ImageClassifier.load_from_checkpoint( # noqa: F841 os.path.join( checkpoint_dir, ray.train.lightning.RayTrainReportCallback.CHECKPOINT_NAME, ), ) if __name__ == "__main__": test_lightning_train_run()
ImageClassifier
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 361442, "end": 361536 }
class ____(stats.rv_continuous): def _cdf(self, x, a): return 42 * a + x
_distr2_gen
python
ipython__ipython
tests/test_pretty.py
{ "start": 4269, "end": 4562 }
class ____(object): __module__ = 1 @property def __class__(self): raise ValueError("I am horrible") def __repr__(self): raise BadException() def test_really_bad_repr(): with pytest.raises(BadException): pretty.pretty(ReallyBadRepr())
ReallyBadRepr
python
openai__openai-python
src/openai/resources/conversations/conversations.py
{ "start": 18428, "end": 19151 }
class ____: def __init__(self, conversations: AsyncConversations) -> None: self._conversations = conversations self.create = async_to_streamed_response_wrapper( conversations.create, ) self.retrieve = async_to_streamed_response_wrapper( conversations.retrieve, ) self.update = async_to_streamed_response_wrapper( conversations.update, ) self.delete = async_to_streamed_response_wrapper( conversations.delete, ) @cached_property def items(self) -> AsyncItemsWithStreamingResponse: return AsyncItemsWithStreamingResponse(self._conversations.items)
AsyncConversationsWithStreamingResponse
python
getsentry__sentry
src/sentry/rules/conditions/event_frequency.py
{ "start": 15793, "end": 18909 }
class ____(BaseEventFrequencyCondition): id = "sentry.rules.conditions.event_frequency.EventFrequencyCondition" label = "The issue is seen more than {value} times in {interval}" def query_hook( self, event: GroupEvent, start: datetime, end: datetime, environment_id: int, ) -> int: sums: Mapping[int, int] = self.get_snuba_query_result( tsdb_function=self.tsdb.get_sums, keys=[event.group_id], group_id=event.group.id, organization_id=event.group.project.organization_id, model=get_issue_tsdb_group_model(event.group.issue_category), start=start, end=end, environment_id=environment_id, referrer_suffix="alert_event_frequency", group_on_time=False, project_ids=[event.group.project_id], ) return sums[event.group_id] def batch_query_hook( self, group_ids: set[int], start: datetime, end: datetime, environment_id: int, group_on_time: bool = False, ) -> dict[int, int | float]: batch_sums: dict[int, int | float] = defaultdict(int) groups = Group.objects.filter(id__in=group_ids).values( "id", "type", "project_id", "project__organization_id" ) error_issue_ids, generic_issue_ids = self.get_error_and_generic_group_ids(groups) organization_id = self.get_value_from_groups(groups, "project__organization_id") if error_issue_ids and organization_id: # Extract project_ids for error groups error_project_ids = [g["project_id"] for g in groups if g["id"] in error_issue_ids] error_sums = self.get_chunked_result( tsdb_function=self.tsdb.get_sums, model=get_issue_tsdb_group_model(GroupCategory.ERROR), group_ids=error_issue_ids, organization_id=organization_id, start=start, end=end, environment_id=environment_id, referrer_suffix="batch_alert_event_frequency", group_on_time=group_on_time, project_ids=error_project_ids, ) batch_sums.update(error_sums) if generic_issue_ids and organization_id: generic_sums = self.get_chunked_result( tsdb_function=self.tsdb.get_sums, # this isn't necessarily performance, just any non-error category model=get_issue_tsdb_group_model(GroupCategory.PERFORMANCE), group_ids=generic_issue_ids, organization_id=organization_id, start=start, end=end, environment_id=environment_id, referrer_suffix="batch_alert_event_frequency", group_on_time=group_on_time, ) batch_sums.update(generic_sums) return batch_sums def get_preview_aggregate(self) -> tuple[str, str]: return "count", "roundedTime"
EventFrequencyCondition
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 646450, "end": 646787 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("TeamDiscussionComment", graphql_name="node")
TeamDiscussionCommentEdge
python
getsentry__sentry
tests/sentry/integrations/api/endpoints/test_integration_proxy.py
{ "start": 2451, "end": 24677 }
class ____(APITestCase): endpoint = "sentry-api-0-internal-integration-proxy" secret = SENTRY_SUBNET_SECRET def setUp(self) -> None: self.factory = RequestFactory() self.proxy_path = "chat.postMessage" self.endpoint_cls = InternalIntegrationProxyEndpoint() self.endpoint_cls.proxy_path = self.proxy_path self.path = f"{PROXY_BASE_PATH}/" self.integration = self.create_integration( self.organization, external_id="example:1", provider="example" ) self.org_integration = OrganizationIntegration.objects.get( integration_id=self.integration.id ) self.valid_header_kwargs = self.create_request_headers( integration_id=self.org_integration.id, signature_path=self.proxy_path ) self.valid_request = self.factory.get(self.path, **self.valid_header_kwargs) def assert_metric_count( self, *, metric_name: str, count: int, mock_metrics: MagicMock, metric_prefix: str = "hybrid_cloud.integration_proxy", kwargs_to_match: dict[str, Any] | None = None, ): metric_name = f"{metric_prefix}.{metric_name}" logged_metrics = {call.args[0] for call in mock_metrics.call_args_list} metric_in_set = metric_name in logged_metrics # Depending on the count, we assert the metric is in the set or not if count == 0: assert not metric_in_set, f"Metric {metric_name} found in {logged_metrics}" else: assert metric_in_set, f"Metric {metric_name} not found in {logged_metrics}" # Finding matching metric calls with the same name matching_mock_calls = [ call for call in mock_metrics.call_args_list if call.args[0] == metric_name ] assert len(matching_mock_calls) == count if kwargs_to_match is not None: for call in matching_mock_calls: assert call.kwargs == kwargs_to_match def assert_failure_metric_count( self, *, failure_type: IntegrationProxyFailureMetricType, count: int, mock_metrics: MagicMock, tags: Tags | None = None, ): metric_name = "proxy_failure" kwargs: dict[str, Any] = { "sample_rate": 1.0, "tags": {"failure_type": failure_type, **(tags or {})}, } self.assert_metric_count( metric_name=metric_name, count=count, mock_metrics=mock_metrics, kwargs_to_match=kwargs, ) def create_request_headers( self, signature_path, integration_id: int | None = None, request_body=b"", base_url="https://example.com/api", ): signature = encode_subnet_signature( secret=self.secret, base_url=base_url, path=signature_path, identifier=str(integration_id), request_body=request_body, ) return SiloHttpHeaders( HTTP_X_SENTRY_SUBNET_BASE_URL=base_url, HTTP_X_SENTRY_SUBNET_SIGNATURE=signature, HTTP_X_SENTRY_SUBNET_ORGANIZATION_INTEGRATION=str(integration_id), HTTP_X_SENTRY_SUBNET_PATH=signature_path, ) @override_settings(SENTRY_SUBNET_SECRET=SENTRY_SUBNET_SECRET, SILO_MODE=SiloMode.CONTROL) @patch.object(ExampleIntegration, "get_client") @patch.object(InternalIntegrationProxyEndpoint, "client", spec=IntegrationProxyClient) @patch.object(metrics, "incr") def test_proxy( self, mock_metrics: MagicMock, mock_client: MagicMock, mock_get_client: MagicMock ) -> None: signature_path = f"/{self.proxy_path}" headers = self.create_request_headers( signature_path=signature_path, integration_id=self.org_integration.id, ) mock_response = Mock(spec=Response) mock_response.content = str({"some": "data"}).encode("utf-8") mock_response.status_code = 400 mock_response.reason = "Bad Request" mock_response.headers = { "Content-Type": "application/json", "X-Arbitrary": "Value", PROXY_SIGNATURE_HEADER: "123", } mock_client.base_url = "https://example.com/api" mock_client.authorize_request = MagicMock(side_effect=lambda req: req) mock_client.request = MagicMock(return_value=mock_response) mock_get_client.return_value = mock_client proxy_response = self.client.get(self.path, **headers) prepared_request = mock_client.request.call_args.kwargs["prepared_request"] assert prepared_request.url == "https://example.com/api/chat.postMessage" assert prepared_request.headers == { "Cookie": "", "Content-Type": "application/octet-stream", } assert proxy_response.content == mock_response.content assert proxy_response.status_code == mock_response.status_code assert proxy_response.reason_phrase == mock_response.reason assert proxy_response["Content-Type"] == mock_response.headers["Content-Type"] assert proxy_response["X-Arbitrary"] == mock_response.headers["X-Arbitrary"] assert proxy_response.get(PROXY_SIGNATURE_HEADER) is None self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.INITIALIZE, count=1, mock_metrics=mock_metrics, kwargs_to_match={"sample_rate": 1.0, "tags": None}, ) self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.COMPLETE_RESPONSE_CODE, count=1, mock_metrics=mock_metrics, kwargs_to_match={"sample_rate": 1.0, "tags": {"status": 400}}, ) @override_settings(SENTRY_SUBNET_SECRET=SENTRY_SUBNET_SECRET, SILO_MODE=SiloMode.CONTROL) @patch.object(ExampleIntegration, "get_client") @patch.object(InternalIntegrationProxyEndpoint, "client", spec=IntegrationProxyClient) @patch.object(metrics, "incr") def test_proxy_with_different_base_url( self, mock_metrics: MagicMock, mock_client: MagicMock, mock_get_client: MagicMock ) -> None: signature_path = f"/{self.proxy_path}" headers = self.create_request_headers( signature_path=signature_path, integration_id=self.org_integration.id, base_url="https://foobar.example.com/api", ) mock_response = Mock(spec=Response) mock_response.content = str({"some": "data"}).encode("utf-8") mock_response.status_code = 400 mock_response.reason = "Bad Request" mock_response.headers = { "Content-Type": "application/json", "X-Arbitrary": "Value", PROXY_SIGNATURE_HEADER: "123", } mock_client.base_url = "https://example.com/api" mock_client.authorize_request = MagicMock(side_effect=lambda req: req) mock_client.request = MagicMock(return_value=mock_response) mock_get_client.return_value = mock_client proxy_response = self.client.get(self.path, **headers) prepared_request = mock_client.request.call_args.kwargs["prepared_request"] assert prepared_request.url == "https://foobar.example.com/api/chat.postMessage" assert prepared_request.headers == { "Cookie": "", "Content-Type": "application/octet-stream", } assert proxy_response.content == mock_response.content assert proxy_response.status_code == mock_response.status_code assert proxy_response.reason_phrase == mock_response.reason assert proxy_response["Content-Type"] == mock_response.headers["Content-Type"] assert proxy_response["X-Arbitrary"] == mock_response.headers["X-Arbitrary"] assert proxy_response.get(PROXY_SIGNATURE_HEADER) is None self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.INITIALIZE, count=1, mock_metrics=mock_metrics, kwargs_to_match={"sample_rate": 1.0, "tags": None}, ) self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.COMPLETE_RESPONSE_CODE, count=1, mock_metrics=mock_metrics, kwargs_to_match={"sample_rate": 1.0, "tags": {"status": 400}}, ) @override_settings(SENTRY_SUBNET_SECRET=SENTRY_SUBNET_SECRET, SILO_MODE=SiloMode.CONTROL) @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @patch.object(ExampleIntegration, "get_client") @patch.object(InternalIntegrationProxyEndpoint, "client", spec=IntegrationProxyClient) @patch.object(metrics, "incr") def test_proxy_request_with_missing_integration_id( self, mock_metrics: MagicMock, mock_client: MagicMock, mock_get_client: MagicMock, mock_record_event: MagicMock, ) -> None: signature_path = f"/{self.proxy_path}" headers = self.create_request_headers( signature_path=signature_path, integration_id=None, ) mock_response = Mock(spec=Response) mock_response.content = str({"foo": "bar"}).encode("utf-8") mock_response.status_code = 200 mock_response.headers = { "Content-Type": "application/json", "X-Arbitrary": "Value", PROXY_SIGNATURE_HEADER: "123", } mock_client.base_url = "https://example.com/api" mock_client.authorize_request = MagicMock(side_effect=lambda req: req) mock_client.request = MagicMock(return_value=mock_response) mock_get_client.return_value = mock_client proxy_response = self.client.get(self.path, **headers) assert proxy_response.status_code == 400 assert mock_client.request.call_count == 0 assert proxy_response.get(PROXY_SIGNATURE_HEADER) is None self.assert_failure_metric_count( failure_type=IntegrationProxyFailureMetricType.INVALID_REQUEST, count=1, mock_metrics=mock_metrics, ) # SLO assertions # SHOULD_PROXY (failure) assert_count_of_metric(mock_record_event, EventLifecycleOutcome.STARTED, 1) assert_count_of_metric(mock_record_event, EventLifecycleOutcome.FAILURE, 1) @override_settings(SENTRY_SUBNET_SECRET=secret, SILO_MODE=SiloMode.CONTROL) def test__validate_sender(self) -> None: # Missing header data header_kwargs = SiloHttpHeaders() request = self.factory.get(self.path, **header_kwargs) assert not self.endpoint_cls._validate_sender(request) # Bad header data header_kwargs = SiloHttpHeaders( HTTP_X_SENTRY_SUBNET_SIGNATURE="data", HTTP_X_SENTRY_SUBNET_ORGANIZATION_INTEGRATION="present", ) request = self.factory.get(self.path, **header_kwargs) assert not self.endpoint_cls._validate_sender(request) # Success assert self.endpoint_cls._validate_sender(self.valid_request) @override_settings(SENTRY_SUBNET_SECRET=secret, SILO_MODE=SiloMode.CONTROL) def test__validate_request(self): request = self.factory.get(self.path) assert not self.endpoint_cls._validate_request(request) @override_settings(SENTRY_SUBNET_SECRET=secret, SILO_MODE=SiloMode.CONTROL) def test__validate_header_data(self): self.org_integration.update(status=ObjectStatus.DISABLED) header_kwargs = SiloHttpHeaders( HTTP_X_SENTRY_SUBNET_ORGANIZATION_INTEGRATION=str(self.org_integration.id), ) request = self.factory.get(self.path, **header_kwargs) assert not self.endpoint_cls._validate_request(request) @override_settings(SENTRY_SUBNET_SECRET=secret, SILO_MODE=SiloMode.CONTROL) def test__validate_organization_integration(self): header_kwargs = SiloHttpHeaders( HTTP_X_SENTRY_SUBNET_ORGANIZATION_INTEGRATION="None", ) request = self.factory.get(self.path, **header_kwargs) assert not self.endpoint_cls._validate_request(request) @patch.object(metrics, "incr") @override_settings(SENTRY_SUBNET_SECRET=secret, SILO_MODE=SiloMode.CONTROL) def test__invalid_integration(self, mock_metrics): self.org_integration.update(status=ObjectStatus.ACTIVE) self.integration.update(status=ObjectStatus.DISABLED) header_kwargs = SiloHttpHeaders( HTTP_X_SENTRY_SUBNET_ORGANIZATION_INTEGRATION=str(self.org_integration.id), ) request = self.factory.get(self.path, **header_kwargs) assert not self.endpoint_cls._validate_request(request) self.assert_failure_metric_count( failure_type=IntegrationProxyFailureMetricType.INVALID_INTEGRATION, count=1, mock_metrics=mock_metrics, ) @patch.object(metrics, "incr") @patch.object(Integration, "get_installation") @override_settings(SENTRY_SUBNET_SECRET=secret, SILO_MODE=SiloMode.CONTROL) def test_invalid_client(self, mock_get_installation, mock_metrics): header_kwargs = SiloHttpHeaders( HTTP_X_SENTRY_SUBNET_ORGANIZATION_INTEGRATION=str(self.org_integration.id), ) self.integration.update(status=ObjectStatus.ACTIVE) mock_get_installation().get_client = MagicMock(return_value=ApiClient()) request = self.factory.get(self.path, **header_kwargs) assert not self.endpoint_cls._validate_request(request) self.assert_failure_metric_count( failure_type=IntegrationProxyFailureMetricType.INVALID_CLIENT, count=1, mock_metrics=mock_metrics, ) @patch.object(Integration, "get_installation") @patch.object(metrics, "incr") @override_settings(SENTRY_SUBNET_SECRET=secret, SILO_MODE=SiloMode.CONTROL) def test_successful_response(self, mock_metrics, mock_get_installation): header_kwargs = SiloHttpHeaders( HTTP_X_SENTRY_SUBNET_ORGANIZATION_INTEGRATION=str(self.org_integration.id), ) mock_get_installation().get_client = MagicMock( return_value=IntegrationProxyClient(org_integration_id=self.org_integration.id) ) request = self.factory.get(self.path, **header_kwargs) assert self.endpoint_cls._validate_request(request) # We don't expect there to be any metrics recorded for successful requests mock_metrics.assert_not_called() def raise_exception(self, exc_type: type[Exception], *args, **kwargs): raise exc_type(*args) @override_settings(SENTRY_SUBNET_SECRET=SENTRY_SUBNET_SECRET, SILO_MODE=SiloMode.CONTROL) @patch.object(ExampleIntegration, "get_client") @patch.object(InternalIntegrationProxyEndpoint, "client", spec=IntegrationProxyClient) @patch.object(metrics, "incr") def test_handles_identity_not_valid( self, mock_metrics: MagicMock, mock_client: MagicMock, mock_get_client: MagicMock ) -> None: signature_path = f"/{self.proxy_path}" headers = self.create_request_headers( signature_path=signature_path, integration_id=self.org_integration.id ) mock_client.base_url = "https://example.com/api" mock_client.authorize_request = MagicMock(side_effect=lambda req: req) mock_client.request = MagicMock( side_effect=lambda *args, **kwargs: self.raise_exception(exc_type=IdentityNotValid) ) mock_get_client.return_value = mock_client proxy_response = self.client.get(self.path, **headers) assert proxy_response.status_code == 400 assert proxy_response.data is None self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.INITIALIZE, count=1, mock_metrics=mock_metrics, kwargs_to_match={"sample_rate": 1.0, "tags": None}, ) self.assert_failure_metric_count( failure_type=IntegrationProxyFailureMetricType.INVALID_IDENTITY, count=1, mock_metrics=mock_metrics, ) self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.COMPLETE_RESPONSE_CODE, count=0, mock_metrics=mock_metrics, ) @override_settings(SENTRY_SUBNET_SECRET=SENTRY_SUBNET_SECRET, SILO_MODE=SiloMode.CONTROL) @patch.object(ExampleIntegration, "get_client") @patch.object(InternalIntegrationProxyEndpoint, "client", spec=IntegrationProxyClient) @patch.object(metrics, "incr") def test_handles_api_host_errors( self, mock_metrics: MagicMock, mock_client: MagicMock, mock_get_client: MagicMock ) -> None: signature_path = f"/{self.proxy_path}" headers = self.create_request_headers( signature_path=signature_path, integration_id=self.org_integration.id ) mock_client.base_url = "https://example.com/api" mock_client.authorize_request = MagicMock(side_effect=lambda req: req) mock_client.request = MagicMock( side_effect=lambda *args, **kwargs: self.raise_exception( ApiHostError, "API request failed" ) ) mock_get_client.return_value = mock_client proxy_response = self.client.get(self.path, **headers) assert proxy_response.status_code == 503 assert proxy_response.data is None self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.INITIALIZE, count=1, mock_metrics=mock_metrics, kwargs_to_match={"sample_rate": 1.0, "tags": None}, ) self.assert_failure_metric_count( failure_type=IntegrationProxyFailureMetricType.HOST_UNREACHABLE_ERROR, count=1, mock_metrics=mock_metrics, ) self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.COMPLETE_RESPONSE_CODE, count=0, mock_metrics=mock_metrics, kwargs_to_match={"tags": None}, ) @override_settings(SENTRY_SUBNET_SECRET=SENTRY_SUBNET_SECRET, SILO_MODE=SiloMode.CONTROL) @patch.object(ExampleIntegration, "get_client") @patch.object(InternalIntegrationProxyEndpoint, "client", spec=IntegrationProxyClient) @patch.object(metrics, "incr") def test_handles_api_timeout_error( self, mock_metrics: MagicMock, mock_client: MagicMock, mock_get_client: MagicMock ) -> None: signature_path = f"/{self.proxy_path}" headers = self.create_request_headers( signature_path=signature_path, integration_id=self.org_integration.id ) mock_client.base_url = "https://example.com/api" mock_client.authorize_request = MagicMock(side_effect=lambda req: req) mock_client.request = MagicMock( side_effect=lambda *args, **kwargs: self.raise_exception( ApiTimeoutError, "API request timed out" ) ) mock_get_client.return_value = mock_client proxy_response = self.client.get(self.path, **headers) assert proxy_response.status_code == 504 assert proxy_response.data is None self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.INITIALIZE, count=1, mock_metrics=mock_metrics, kwargs_to_match={"sample_rate": 1.0, "tags": None}, ) self.assert_failure_metric_count( failure_type=IntegrationProxyFailureMetricType.HOST_TIMEOUT_ERROR, count=1, mock_metrics=mock_metrics, ) self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.COMPLETE_RESPONSE_CODE, count=0, mock_metrics=mock_metrics, ) @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") @override_settings(SENTRY_SUBNET_SECRET=SENTRY_SUBNET_SECRET, SILO_MODE=SiloMode.CONTROL) @patch.object(ExampleIntegration, "get_client") @patch.object(InternalIntegrationProxyEndpoint, "client", spec=IntegrationProxyClient) @patch.object(metrics, "incr") def test_returns_500_for_unexpected_error( self, mock_metrics: MagicMock, mock_client: MagicMock, mock_get_client: MagicMock, mock_record_event: MagicMock, ) -> None: signature_path = f"/{self.proxy_path}" headers = self.create_request_headers( signature_path=signature_path, integration_id=self.org_integration.id ) mock_client.base_url = "https://example.com/api" mock_client.authorize_request = MagicMock(side_effect=lambda req: req) mock_client.request = MagicMock( side_effect=lambda *args, **kwargs: self.raise_exception(exc_type=Exception) ) mock_get_client.return_value = mock_client proxy_response = self.client.get(self.path, **headers) assert proxy_response.status_code == 500 self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.INITIALIZE, count=1, mock_metrics=mock_metrics, kwargs_to_match={"sample_rate": 1.0, "tags": None}, ) self.assert_failure_metric_count( failure_type=IntegrationProxyFailureMetricType.UNKNOWN_ERROR, count=1, mock_metrics=mock_metrics, ) self.assert_metric_count( metric_name=IntegrationProxySuccessMetricType.COMPLETE_RESPONSE_CODE, count=0, mock_metrics=mock_metrics, ) # SLO assertions # SHOULD_PROXY (success) -> PROXY_REQUEST (failure) assert_failure_metric(mock_record_event, Exception("Unknown error")) assert_count_of_metric(mock_record_event, EventLifecycleOutcome.STARTED, 2) assert_count_of_metric(mock_record_event, EventLifecycleOutcome.SUCCESS, 1) assert_count_of_metric(mock_record_event, EventLifecycleOutcome.FAILURE, 1)
InternalIntegrationProxyEndpointTest
python
getsentry__sentry
src/sentry/integrations/messaging/linkage.py
{ "start": 21796, "end": 26650 }
class ____(TeamLinkageView, ABC): @property def metrics_operation_key(self) -> str: return "unlink_team_view" def execute( self, request: HttpRequest, integration: RpcIntegration, params: Mapping[str, Any] ) -> HttpResponseBase: from sentry.integrations.mixins import ( SUCCESS_UNLINKED_TEAM_MESSAGE, SUCCESS_UNLINKED_TEAM_TITLE, ) from sentry.integrations.slack.views.unlink_team import INSUFFICIENT_ACCESS user = serialize_generic_user(request.user) if user is None: raise TypeError("Cannot unlink team without a logged-in user") integration_id: int = integration.id channel_id: str = params["channel_id"] channel_name: str = params["channel_name"] slack_id: str = params["slack_id"] response_url: str = params["response_url"] organization_id: str = params["organization_id"] logger_params = { "user_id": user.id, "integration_id": integration_id, "channel_id": channel_id, "channel_name": channel_name, "slack_id": slack_id, "response_url": response_url, "organization_id": organization_id, } om = OrganizationMember.objects.get_for_integration( integration, user, organization_id=int(organization_id) ).first() organization = om.organization if om else None if om is None or organization is None: logger.info("no-organization-found", extra=logger_params) self.capture_metric("failure.get_organization") return self.render_error_page( request, status=404, body_text="HTTP 404: Could not find the organization." ) external_teams = ExternalActor.objects.filter( organization_id=organization.id, integration_id=integration.id, provider=self.provider.value, external_name=channel_name, external_id=channel_id, ) if len(external_teams) == 0: logger.info("no-team-found", extra=logger_params) self.capture_metric("failure.get_team") return self.render_error_page(request, status=404, body_text="HTTP 404: Team not found") team = external_teams[0].team if team is None: logger.info("no-team-found", extra=logger_params) self.capture_metric("failure.get_team") return self.render_error_page(request, status=404, body_text="HTTP 404: Team not found") logger_params["team_id"] = team.id # Error if you don't have a sufficient role and you're not a team admin # on the team you're trying to unlink. if not self.is_valid_role(om) and not is_team_admin(om, team=team): logger.info("invalid-role", extra=logger_params) self.capture_metric("failure.invalid_role") return self.render_error_page( request, status=403, body_text="HTTP 403: " + INSUFFICIENT_ACCESS ) if request.method == "GET": return render_to_response( "sentry/integrations/slack/unlink-team.html", request=request, context={ "team": team, "channel_name": channel_name, "provider": integration.get_provider(), "react_config": get_client_config(request, self.active_organization), }, ) idp = identity_service.get_provider( provider_ext_id=integration.external_id, provider_type=self.external_provider_enum.value, ) if not idp or not identity_service.get_identity( filter={"provider_id": idp.id, "identity_ext_id": slack_id} ): logger.info("identity-not-found", extra=logger_params) self.capture_metric("failure.identity_not_found") return self.render_error_page( request, status=403, body_text="HTTP 403: User identity does not exist" ) # Someone may have accidentally added multiple teams so unlink them all. for external_team in external_teams: external_team.delete() self.capture_metric("success.post") return render_to_response( "sentry/integrations/slack/unlinked-team.html", request=request, context={ "heading_text": SUCCESS_UNLINKED_TEAM_TITLE, "body_text": SUCCESS_UNLINKED_TEAM_MESSAGE.format(team=team.slug), "channel_id": channel_id, "team_id": integration.external_id, "react_config": get_client_config(request, self.active_organization), }, )
UnlinkTeamView
python
django-crispy-forms__django-crispy-forms
tests/forms.py
{ "start": 4952, "end": 5304 }
class ____(BaseModelForm): is_company = forms.CharField(label="company", required=False, widget=forms.CheckboxInput()) password2 = forms.CharField(label="re-enter password", max_length=30, required=True, widget=forms.PasswordInput()) class Meta: model = CrispyTestModel fields = ("email", "password2", "password")
SampleForm8
python
ray-project__ray
doc/source/serve/doc_code/model_composition/response_to_object_ref_example.py
{ "start": 301, "end": 407 }
class ____: def __call__(self) -> str: return "Hi from Serve deployment" @serve.deployment
SayHi
python
keras-team__keras
keras/src/layers/preprocessing/image_preprocessing/random_flip_test.py
{ "start": 239, "end": 779 }
class ____(layers.RandomFlip): def call(self, inputs, training=True): unbatched = len(inputs.shape) == 3 batch_size = 1 if unbatched else self.backend.shape(inputs)[0] mocked_value = self.backend.numpy.full( (batch_size, 1, 1, 1), 0.1, dtype="float32" ) with unittest.mock.patch.object( self.backend.random, "uniform", return_value=mocked_value, ): out = super().call(inputs, training=training) return out
MockedRandomFlip
python
keon__algorithms
algorithms/unionfind/count_islands.py
{ "start": 595, "end": 3876 }
class ____: """ A Union-Find data structure. Consider the following sequence of events: Starting with the elements 1, 2, 3, and 4: {1} {2} {3} {4} Initally they all live in their own sets, which means that `root(1) != root(3)`, however, if we call `unite(1, 3)` we would then have the following: {1,3} {2} {4} Now we have `root(1) == root(3)`, but it is still the case that `root(1) != root(2)`. We may call `unite(2, 4)` and end up with: {1,3} {2,4} Again we have `root(1) != root(2)`. But after `unite(3, 4)` we end up with: {1,2,3,4} which results in `root(1) == root(2)`. """ def __init__(self): self.parents = {} self.size = {} self.count = 0 def add(self, element): """ Add a new set containing the single element """ self.parents[element] = element self.size[element] = 1 self.count += 1 def root(self, element): """ Find the root element which represents the set of a given element. That is, all elements that are in the same set will return the same root element. """ while element != self.parents[element]: self.parents[element] = self.parents[self.parents[element]] element = self.parents[element] return element def unite(self, element1, element2): """ Finds the sets which contains the two elements and merges them into a single set. """ root1, root2 = self.root(element1), self.root(element2) if root1 == root2: return if self.size[root1] > self.size[root2]: root1, root2 = root2, root1 self.parents[root1] = root2 self.size[root2] += self.size[root1] self.count -= 1 def num_islands(positions): """ Given a list of positions to operate, count the number of islands after each addLand operation. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water. Given a 3x3 grid, positions = [[0,0], [0,1], [1,2], [2,1]]. Initially, the 2d grid grid is filled with water. (Assume 0 represents water and 1 represents land). 0 0 0 0 0 0 0 0 0 Operation #1: addLand(0, 0) turns the water at grid[0][0] into a land. 1 0 0 0 0 0 Number of islands = 1 0 0 0 Operation #2: addLand(0, 1) turns the water at grid[0][1] into a land. 1 1 0 0 0 0 Number of islands = 1 0 0 0 Operation #3: addLand(1, 2) turns the water at grid[1][2] into a land. 1 1 0 0 0 1 Number of islands = 2 0 0 0 Operation #4: addLand(2, 1) turns the water at grid[2][1] into a land. 1 1 0 0 0 1 Number of islands = 3 0 1 0 """ ans = [] islands = Union() for position in map(tuple, positions): islands.add(position) for delta in (0, 1), (0, -1), (1, 0), (-1, 0): adjacent = (position[0] + delta[0], position[1] + delta[1]) if adjacent in islands.parents: islands.unite(position, adjacent) ans += [islands.count] return ans
Union
python
airbytehq__airbyte
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py
{ "start": 3473, "end": 4082 }
class ____(FBMarketingStream): """doc: https://developers.facebook.com/docs/marketing-api/reference/custom-audience""" entity_prefix = "customaudience" # The `rule` field is excluded from the list because it caused the error message "Please reduce the amount of data" for certain connections. # https://github.com/airbytehq/oncall/issues/2765 fields_exceptions = ["rule"] def list_objects(self, params: Mapping[str, Any], account_id: str) -> Iterable: return self._api.get_account(account_id=account_id).get_custom_audiences(params=params, fields=self.fields())
CustomAudiences
python
encode__httpx
httpx/_content.py
{ "start": 863, "end": 1681 }
class ____(SyncByteStream): CHUNK_SIZE = 65_536 def __init__(self, stream: Iterable[bytes]) -> None: self._stream = stream self._is_stream_consumed = False self._is_generator = inspect.isgenerator(stream) def __iter__(self) -> Iterator[bytes]: if self._is_stream_consumed and self._is_generator: raise StreamConsumed() self._is_stream_consumed = True if hasattr(self._stream, "read"): # File-like interfaces should use 'read' directly. chunk = self._stream.read(self.CHUNK_SIZE) while chunk: yield chunk chunk = self._stream.read(self.CHUNK_SIZE) else: # Otherwise iterate. for part in self._stream: yield part
IteratorByteStream
python
spulec__freezegun
tests/test_datetimes.py
{ "start": 18204, "end": 19462 }
class ____(unittest.TestCase): @freeze_time('2013-04-09') def test_method_decorator_works_on_unittest(self) -> None: self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today()) @freeze_time('2013-04-09', as_kwarg='frozen_time') def test_method_decorator_works_on_unittest_kwarg_frozen_time(self, frozen_time: Any) -> None: self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today()) self.assertEqual(datetime.date(2013, 4, 9), frozen_time.time_to_freeze.date()) @freeze_time('2013-04-09', as_kwarg='hello') def test_method_decorator_works_on_unittest_kwarg_hello(self, **kwargs: Any) -> None: self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today()) self.assertEqual(datetime.date(2013, 4, 9), kwargs.get('hello').time_to_freeze.date()) # type: ignore @freeze_time(lambda: datetime.date(year=2013, month=4, day=9), as_kwarg='frozen_time') def test_method_decorator_works_on_unittest_kwarg_frozen_time_with_func(self, frozen_time: Any) -> None: self.assertEqual(datetime.date(2013, 4, 9), datetime.date.today()) self.assertEqual(datetime.date(2013, 4, 9), frozen_time.time_to_freeze.date()) @freeze_time('2013-04-09')
TestUnitTestMethodDecorator
python
doocs__leetcode
solution/0800-0899/0814.Binary Tree Pruning/Solution.py
{ "start": 192, "end": 522 }
class ____: def pruneTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]: if root is None: return root root.left = self.pruneTree(root.left) root.right = self.pruneTree(root.right) if root.val == 0 and root.left == root.right: return None return root
Solution
python
huggingface__transformers
src/transformers/models/paligemma/processing_paligemma.py
{ "start": 1345, "end": 2868 }
class ____(ProcessingKwargs, total=False): text_kwargs: PaliGemmaTextKwargs _defaults = { "text_kwargs": { "padding": False, "return_mm_token_type_ids": False, }, "images_kwargs": { "data_format": "channels_first", }, } # Copied from transformers.models.idefics2.processing_idefics2.is_url def is_url(val) -> bool: return isinstance(val, str) and val.startswith("http") # Copied from transformers.models.idefics2.processing_idefics2.is_image_or_image_url def is_image_or_image_url(elem): return is_url(elem) or is_valid_image(elem) def _is_str_or_image(elem): return isinstance(elem, (str)) or is_image_or_image_url(elem) def build_string_from_input(prompt, bos_token, image_seq_len, image_token, num_images): """ Builds a string from the input prompt and image tokens. For example, for the call: build_string_from_input( prompt="Prefix str" bos_token="<s>", image_seq_len=3, image_token="<im>", ) The output will be: "<im><im><im><s>Initial str" Args: prompt (`list[Union[str, ImageInput]]`): The input prompt. bos_token (`str`): The beginning of sentence token. image_seq_len (`int`): The length of the image sequence. image_token (`str`): The image token. num_images (`int`): Number of images in the prompt. """ return f"{image_token * image_seq_len * num_images}{bos_token}{prompt}\n"
PaliGemmaProcessorKwargs
python
scipy__scipy
scipy/interpolate/_fitpack2.py
{ "start": 27258, "end": 32281 }
class ____(UnivariateSpline): """ 1-D spline with explicit internal knots. .. legacy:: class Specifically, we recommend using `make_lsq_spline` instead. Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t` specifies the internal knots of the spline Parameters ---------- x : (N,) array_like Input dimension of data points -- must be increasing y : (N,) array_like Input dimension of data points t : (M,) array_like interior knots of the spline. Must be in ascending order and:: bbox[0] < t[0] < ... < t[-1] < bbox[-1] w : (N,) array_like, optional weights for spline fitting. Must be positive. If None (default), weights are all 1. bbox : (2,) array_like, optional 2-sequence specifying the boundary of the approximation interval. If None (default), ``bbox = [x[0], x[-1]]``. k : int, optional Degree of the smoothing spline. Must be 1 <= `k` <= 5. Default is `k` = 3, a cubic spline. ext : int or str, optional Controls the extrapolation mode for elements not in the interval defined by the knot sequence. * if ext=0 or 'extrapolate', return the extrapolated value. * if ext=1 or 'zeros', return 0 * if ext=2 or 'raise', raise a ValueError * if ext=3 of 'const', return the boundary value. The default value is 0. check_finite : bool, optional Whether to check that the input arrays contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination or non-sensical results) if the inputs do contain infinities or NaNs. Default is False. Raises ------ ValueError If the interior knots do not satisfy the Schoenberg-Whitney conditions See Also -------- UnivariateSpline : a smooth univariate spline to fit a given set of data points. InterpolatedUnivariateSpline : a interpolating univariate spline for a given set of data points. splrep : a function to find the B-spline representation of a 1-D curve splev : a function to evaluate a B-spline or its derivatives sproot : a function to find the roots of a cubic B-spline splint : a function to evaluate the definite integral of a B-spline between two given points spalde : a function to evaluate all derivatives of a B-spline Notes ----- The number of data points must be larger than the spline degree `k`. Knots `t` must satisfy the Schoenberg-Whitney conditions, i.e., there must be a subset of data points ``x[j]`` such that ``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``. Examples -------- >>> import numpy as np >>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() >>> x = np.linspace(-3, 3, 50) >>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50) Fit a smoothing spline with a pre-defined internal knots: >>> t = [-1, 0, 1] >>> spl = LSQUnivariateSpline(x, y, t) >>> xs = np.linspace(-3, 3, 1000) >>> plt.plot(x, y, 'ro', ms=5) >>> plt.plot(xs, spl(xs), 'g-', lw=3) >>> plt.show() Check the knot vector: >>> spl.get_knots() array([-3., -1., 0., 1., 3.]) Constructing lsq spline using the knots from another spline: >>> x = np.arange(10) >>> s = UnivariateSpline(x, x, s=0) >>> s.get_knots() array([ 0., 2., 3., 4., 5., 6., 7., 9.]) >>> knt = s.get_knots() >>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot >>> s1.get_knots() array([ 0., 2., 3., 4., 5., 6., 7., 9.]) """ def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3, ext=0, check_finite=False): x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None, ext, check_finite) if not np.all(diff(x) >= 0.0): raise ValueError('x must be increasing') # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier xb = bbox[0] xe = bbox[1] if xb is None: xb = x[0] if xe is None: xe = x[-1] t = concatenate(([xb]*(k+1), t, [xe]*(k+1))) n = len(t) if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0): raise ValueError('Interior knots t must satisfy ' 'Schoenberg-Whitney conditions') with FITPACK_LOCK: if not dfitpack.fpchec(x, t, k) == 0: raise ValueError(_fpchec_error_string) data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe) self._data = data[:-3] + (None, None, data[-1]) self._reset_class() # ############### Bivariate spline ####################
LSQUnivariateSpline
python
huggingface__transformers
src/transformers/models/lxmert/modeling_lxmert.py
{ "start": 12828, "end": 15663 }
class ____(nn.Module): def __init__(self, config, ctx_dim=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.head_size = self.num_attention_heads * self.attention_head_size # visual_dim = 2048 if ctx_dim is None: ctx_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.head_size) self.key = nn.Linear(ctx_dim, self.head_size) self.value = nn.Linear(ctx_dim, self.head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states, context, attention_mask=None, output_attentions=False): batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(context).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) ) value_layer = ( self.value(context) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) if attention_mask is not None: attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
LxmertAttention
python
ray-project__ray
python/ray/serve/tests/test_config_files/fail_2.py
{ "start": 55, "end": 146 }
class ____: def __init__(self): time.sleep(5) _ = 1 / 0 node = A.bind()
A
python
gevent__gevent
src/gevent/tests/test__threadpool.py
{ "start": 595, "end": 1861 }
class ____(greentest.TestCase): # These generally need more time __timeout__ = greentest.LARGE_TIMEOUT pool = None _all_pools = () ClassUnderTest = ThreadPool def _FUT(self): return self.ClassUnderTest def _makeOne(self, maxsize, create_all_worker_threads=greentest.RUN_LEAKCHECKS): self.pool = pool = self._FUT()(maxsize) self._all_pools += (pool,) if create_all_worker_threads: # Max size to help eliminate false positives self.pool.size = maxsize return pool def cleanup(self): self.pool = None all_pools, self._all_pools = self._all_pools, () for pool in all_pools: kill = getattr(pool, 'kill', None) or getattr(pool, 'shutdown') kill() del kill if greentest.RUN_LEAKCHECKS: # Each worker thread created a greenlet object and switched to it. # It's a custom subclass, but even if it's not, it appears that # the root greenlet for the new thread sticks around until there's a # gc. Simply calling 'getcurrent()' is enough to "leak" a greenlet.greenlet # and a weakref. for _ in range(3): gc.collect()
TestCase
python
encode__django-rest-framework
rest_framework/fields.py
{ "start": 32566, "end": 34113 }
class ____(Field): default_error_messages = { 'invalid': _('A valid number is required.'), 'max_value': _('Ensure this value is less than or equal to {max_value}.'), 'min_value': _('Ensure this value is greater than or equal to {min_value}.'), 'max_string_length': _('String value too large.'), 'overflow': _('Integer value too large to convert to float') } MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs. def __init__(self, **kwargs): self.max_value = kwargs.pop('max_value', None) self.min_value = kwargs.pop('min_value', None) super().__init__(**kwargs) if self.max_value is not None: message = lazy_format(self.error_messages['max_value'], max_value=self.max_value) self.validators.append( MaxValueValidator(self.max_value, message=message)) if self.min_value is not None: message = lazy_format(self.error_messages['min_value'], min_value=self.min_value) self.validators.append( MinValueValidator(self.min_value, message=message)) def to_internal_value(self, data): if isinstance(data, str) and len(data) > self.MAX_STRING_LENGTH: self.fail('max_string_length') try: return float(data) except (TypeError, ValueError): self.fail('invalid') except OverflowError: self.fail('overflow') def to_representation(self, value): return float(value)
FloatField
python
huggingface__transformers
src/transformers/models/layoutlmv2/modeling_layoutlmv2.py
{ "start": 35770, "end": 45148 }
class ____(LayoutLMv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.layoutlmv2 = LayoutLMv2Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.layoutlmv2.embeddings.word_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, image: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" input_ids (`torch.LongTensor` of shape `batch_size, sequence_length`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`): Batch of document images. token_type_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Example: ```python >>> from transformers import AutoProcessor, LayoutLMv2ForSequenceClassification, set_seed >>> from PIL import Image >>> import torch >>> from datasets import load_dataset >>> set_seed(0) >>> dataset = load_dataset("aharley/rvl_cdip", split="train", streaming=True) >>> data = next(iter(dataset)) >>> image = data["image"].convert("RGB") >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") >>> model = LayoutLMv2ForSequenceClassification.from_pretrained( ... "microsoft/layoutlmv2-base-uncased", num_labels=dataset.info.features["label"].num_classes ... ) >>> encoding = processor(image, return_tensors="pt") >>> sequence_label = torch.tensor([data["label"]]) >>> outputs = model(**encoding, labels=sequence_label) >>> loss, logits = outputs.loss, outputs.logits >>> predicted_idx = logits.argmax(dim=-1).item() >>> predicted_answer = dataset.info.features["label"].names[4] >>> predicted_idx, predicted_answer # results are not good without further fine-tuning (7, 'advertisement') ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device visual_shape = list(input_shape) visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1] visual_shape = torch.Size(visual_shape) final_shape = list(input_shape) final_shape[1] += visual_shape[1] final_shape = torch.Size(final_shape) visual_bbox = self.layoutlmv2._calc_visual_bbox( self.config.image_feature_pool_shape, bbox, device, final_shape ) visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat( input_shape[0], 1 ) initial_image_embeddings = self.layoutlmv2._calc_img_embeddings( image=image, bbox=visual_bbox, position_ids=visual_position_ids, ) outputs = self.layoutlmv2( input_ids=input_ids, bbox=bbox, image=image, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] sequence_output, final_image_embeddings = outputs[0][:, :seq_length], outputs[0][:, seq_length:] cls_final_output = sequence_output[:, 0, :] # average-pool the visual embeddings pooled_initial_image_embeddings = initial_image_embeddings.mean(dim=1) pooled_final_image_embeddings = final_image_embeddings.mean(dim=1) # concatenate with cls_final_output sequence_output = torch.cat( [cls_final_output, pooled_initial_image_embeddings, pooled_final_image_embeddings], dim=1 ) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden states) e.g. for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda). """ )
LayoutLMv2ForSequenceClassification
python
pytorch__pytorch
test/inductor/test_aot_inductor_utils.py
{ "start": 704, "end": 913 }
class ____(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, *args, **kwargs): return self.model(*args, **kwargs)
WrapperModule
python
bokeh__bokeh
src/bokeh/models/ui/icons.py
{ "start": 3866, "end": 5155 }
class ____(Icon): """ Icons from an external icon provider (https://tabler-icons.io/). .. note:: This icon set is MIT licensed (see https://github.com/tabler/tabler-icons/blob/master/LICENSE). .. note:: External icons are loaded from third-party servers and may not be available immediately (e.g. due to slow internet connection) or not available at all. It isn't possible to create a self-contained bundles with the use of ``inline`` resources. To circumvent this, one use ``SVGIcon``, by copying the SVG contents of an icon from Tabler's web site. """ # explicit __init__ to support Init signatures def __init__(self, icon_name: Init[str] = Intrinsic, **kwargs: Any) -> None: super().__init__(icon_name=icon_name, **kwargs) icon_name = Required(String, help=""" The name of the icon. See https://tabler-icons.io/ for the list of names. """) #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
TablerIcon
python
doocs__leetcode
solution/1000-1099/1078.Occurrences After Bigram/Solution.py
{ "start": 0, "end": 312 }
class ____: def findOcurrences(self, text: str, first: str, second: str) -> List[str]: words = text.split() ans = [] for i in range(len(words) - 2): a, b, c = words[i : i + 3] if a == first and b == second: ans.append(c) return ans
Solution
python
python__mypy
mypy/errors.py
{ "start": 5424, "end": 7957 }
class ____: """Context manager that can be used to keep track of new errors recorded around a given operation. Errors maintain a stack of such watchers. The handler is called starting at the top of the stack, and is propagated down the stack unless filtered out by one of the ErrorWatcher instances. """ # public attribute for the special treatment of `reveal_type` by # `MessageBuilder.reveal_type`: filter_revealed_type: bool def __init__( self, errors: Errors, *, filter_errors: bool | Callable[[str, ErrorInfo], bool] = False, save_filtered_errors: bool = False, filter_deprecated: bool = False, filter_revealed_type: bool = False, ) -> None: self.errors = errors self._has_new_errors = False self._filter = filter_errors self._filter_deprecated = filter_deprecated self.filter_revealed_type = filter_revealed_type self._filtered: list[ErrorInfo] | None = [] if save_filtered_errors else None def __enter__(self) -> Self: self.errors._watchers.append(self) return self def __exit__(self, exc_type: object, exc_val: object, exc_tb: object) -> Literal[False]: last = self.errors._watchers.pop() assert last == self return False def on_error(self, file: str, info: ErrorInfo) -> bool: """Handler called when a new error is recorded. The default implementation just sets the has_new_errors flag Return True to filter out the error, preventing it from being seen by other ErrorWatcher further down the stack and from being recorded by Errors """ if info.code == codes.DEPRECATED: # Deprecated is not a type error, so it is handled on opt-in basis here. if not self._filter_deprecated: return False self._has_new_errors = True if isinstance(self._filter, bool): should_filter = self._filter elif callable(self._filter): should_filter = self._filter(file, info) else: raise AssertionError(f"invalid error filter: {type(self._filter)}") if should_filter and self._filtered is not None: self._filtered.append(info) return should_filter def has_new_errors(self) -> bool: return self._has_new_errors def filtered_errors(self) -> list[ErrorInfo]: assert self._filtered is not None return self._filtered
ErrorWatcher
python
PyCQA__pylint
pylint/config/callback_actions.py
{ "start": 6581, "end": 7105 }
class ____(_AccessRunObjectAction): """Generate a pylintrc file.""" def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: str | Sequence[Any] | None, option_string: str | None = "--generate-rcfile", ) -> None: # TODO: 4.x: Deprecate this after the auto-upgrade functionality of # pylint-config is sufficient. self.run.linter._generate_config(skipsections=("Commands",)) sys.exit(0)
_GenerateRCFileAction
python
getsentry__sentry
src/sentry/db/models/fields/text.py
{ "start": 829, "end": 885 }
class ____(TextType, models.EmailField): pass
EmailField
python
getsentry__sentry
src/sentry/core/endpoints/organization_member_requests_join.py
{ "start": 2111, "end": 4220 }
class ____(OrganizationEndpoint): publish_status = { "POST": ApiPublishStatus.PRIVATE, } # Disable authentication and permission requirements. permission_classes = () rate_limits = RateLimitConfig( limit_overrides={ "POST": { RateLimitCategory.IP: RateLimit(limit=5, window=86400), RateLimitCategory.USER: RateLimit(limit=5, window=86400), RateLimitCategory.ORGANIZATION: RateLimit(limit=5, window=86400), }, } ) def post(self, request: Request, organization: Organization) -> Response: if organization.get_option("sentry:join_requests") is False: return Response( {"detail": "Your organization does not allow join requests."}, status=403 ) if is_demo_user(request.user): return Response(status=403) # users can already join organizations with SSO enabled without an invite # so they should join that way and not through a request to the admins provider = auth_service.get_auth_provider(organization_id=organization.id) if provider is not None: return Response(status=403) ip_address = request.META["REMOTE_ADDR"] if ratelimiter.backend.is_limited( f"org-join-request:ip:{ip_address}", limit=5, window=86400, # 5 per day, 60 x 60 x 24 ): return Response({"detail": "Rate limit exceeded."}, status=429) serializer = JoinRequestSerializer(data=request.data) if not serializer.is_valid(): return Response(serializer.errors, status=400) result = serializer.validated_data email = result["email"] member = create_organization_join_request(organization, email, ip_address) if member: async_send_notification(JoinRequestNotification, member, request.user) # legacy analytics join_request_created.send_robust(sender=self, member=member) return Response(status=204)
OrganizationJoinRequestEndpoint
python
langchain-ai__langchain
libs/core/langchain_core/rate_limiters.py
{ "start": 160, "end": 2285 }
class ____(abc.ABC): """Base class for rate limiters. Usage of the base limiter is through the acquire and aacquire methods depending on whether running in a sync or async context. Implementations are free to add a timeout parameter to their initialize method to allow users to specify a timeout for acquiring the necessary tokens when using a blocking call. Current limitations: - Rate limiting information is not surfaced in tracing or callbacks. This means that the total time it takes to invoke a chat model will encompass both the time spent waiting for tokens and the time spent making the request. """ @abc.abstractmethod def acquire(self, *, blocking: bool = True) -> bool: """Attempt to acquire the necessary tokens for the rate limiter. This method blocks until the required tokens are available if `blocking` is set to `True`. If `blocking` is set to `False`, the method will immediately return the result of the attempt to acquire the tokens. Args: blocking: If `True`, the method will block until the tokens are available. If `False`, the method will return immediately with the result of the attempt. Returns: `True` if the tokens were successfully acquired, `False` otherwise. """ @abc.abstractmethod async def aacquire(self, *, blocking: bool = True) -> bool: """Attempt to acquire the necessary tokens for the rate limiter. This method blocks until the required tokens are available if `blocking` is set to `True`. If `blocking` is set to `False`, the method will immediately return the result of the attempt to acquire the tokens. Args: blocking: If `True`, the method will block until the tokens are available. If `False`, the method will return immediately with the result of the attempt. Returns: `True` if the tokens were successfully acquired, `False` otherwise. """
BaseRateLimiter
python
redis__redis-py
redis/cluster.py
{ "start": 121660, "end": 134141 }
class ____(AbstractStrategy): NO_SLOTS_COMMANDS = {"UNWATCH"} IMMEDIATE_EXECUTE_COMMANDS = {"WATCH", "UNWATCH"} UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"} SLOT_REDIRECT_ERRORS = (AskError, MovedError) CONNECTION_ERRORS = ( ConnectionError, OSError, ClusterDownError, SlotNotCoveredError, ) def __init__(self, pipe: ClusterPipeline): super().__init__(pipe) self._explicit_transaction = False self._watching = False self._pipeline_slots: Set[int] = set() self._transaction_connection: Optional[Connection] = None self._executing = False self._retry = copy(self._pipe.retry) self._retry.update_supported_errors( RedisCluster.ERRORS_ALLOW_RETRY + self.SLOT_REDIRECT_ERRORS ) def _get_client_and_connection_for_transaction(self) -> Tuple[Redis, Connection]: """ Find a connection for a pipeline transaction. For running an atomic transaction, watch keys ensure that contents have not been altered as long as the watch commands for those keys were sent over the same connection. So once we start watching a key, we fetch a connection to the node that owns that slot and reuse it. """ if not self._pipeline_slots: raise RedisClusterException( "At least a command with a key is needed to identify a node" ) node: ClusterNode = self._nodes_manager.get_node_from_slot( list(self._pipeline_slots)[0], False ) redis_node: Redis = self._pipe.get_redis_connection(node) if self._transaction_connection: if not redis_node.connection_pool.owns_connection( self._transaction_connection ): previous_node = self._nodes_manager.find_connection_owner( self._transaction_connection ) previous_node.connection_pool.release(self._transaction_connection) self._transaction_connection = None if not self._transaction_connection: self._transaction_connection = get_connection(redis_node) return redis_node, self._transaction_connection def execute_command(self, *args, **kwargs): slot_number: Optional[int] = None if args[0] not in ClusterPipeline.NO_SLOTS_COMMANDS: slot_number = self._pipe.determine_slot(*args) if ( self._watching or args[0] in self.IMMEDIATE_EXECUTE_COMMANDS ) and not self._explicit_transaction: if args[0] == "WATCH": self._validate_watch() if slot_number is not None: if self._pipeline_slots and slot_number not in self._pipeline_slots: raise CrossSlotTransactionError( "Cannot watch or send commands on different slots" ) self._pipeline_slots.add(slot_number) elif args[0] not in self.NO_SLOTS_COMMANDS: raise RedisClusterException( f"Cannot identify slot number for command: {args[0]}," "it cannot be triggered in a transaction" ) return self._immediate_execute_command(*args, **kwargs) else: if slot_number is not None: self._pipeline_slots.add(slot_number) return self.pipeline_execute_command(*args, **kwargs) def _validate_watch(self): if self._explicit_transaction: raise RedisError("Cannot issue a WATCH after a MULTI") self._watching = True def _immediate_execute_command(self, *args, **options): return self._retry.call_with_retry( lambda: self._get_connection_and_send_command(*args, **options), self._reinitialize_on_error, ) def _get_connection_and_send_command(self, *args, **options): redis_node, connection = self._get_client_and_connection_for_transaction() return self._send_command_parse_response( connection, redis_node, args[0], *args, **options ) def _send_command_parse_response( self, conn, redis_node: Redis, command_name, *args, **options ): """ Send a command and parse the response """ conn.send_command(*args) output = redis_node.parse_response(conn, command_name, **options) if command_name in self.UNWATCH_COMMANDS: self._watching = False return output def _reinitialize_on_error(self, error): if self._watching: if type(error) in self.SLOT_REDIRECT_ERRORS and self._executing: raise WatchError("Slot rebalancing occurred while watching keys") if ( type(error) in self.SLOT_REDIRECT_ERRORS or type(error) in self.CONNECTION_ERRORS ): if self._transaction_connection: self._transaction_connection = None self._pipe.reinitialize_counter += 1 if self._pipe._should_reinitialized(): self._nodes_manager.initialize() self.reinitialize_counter = 0 else: if isinstance(error, AskError): self._nodes_manager.update_moved_exception(error) self._executing = False def _raise_first_error(self, responses, stack): """ Raise the first exception on the stack """ for r, cmd in zip(responses, stack): if isinstance(r, Exception): self.annotate_exception(r, cmd.position + 1, cmd.args) raise r def execute(self, raise_on_error: bool = True) -> List[Any]: stack = self._command_queue if not stack and (not self._watching or not self._pipeline_slots): return [] return self._execute_transaction_with_retries(stack, raise_on_error) def _execute_transaction_with_retries( self, stack: List["PipelineCommand"], raise_on_error: bool ): return self._retry.call_with_retry( lambda: self._execute_transaction(stack, raise_on_error), self._reinitialize_on_error, ) def _execute_transaction( self, stack: List["PipelineCommand"], raise_on_error: bool ): if len(self._pipeline_slots) > 1: raise CrossSlotTransactionError( "All keys involved in a cluster transaction must map to the same slot" ) self._executing = True redis_node, connection = self._get_client_and_connection_for_transaction() stack = chain( [PipelineCommand(("MULTI",))], stack, [PipelineCommand(("EXEC",))], ) commands = [c.args for c in stack if EMPTY_RESPONSE not in c.options] packed_commands = connection.pack_commands(commands) connection.send_packed_command(packed_commands) errors = [] # parse off the response for MULTI # NOTE: we need to handle ResponseErrors here and continue # so that we read all the additional command messages from # the socket try: redis_node.parse_response(connection, "MULTI") except ResponseError as e: self.annotate_exception(e, 0, "MULTI") errors.append(e) except self.CONNECTION_ERRORS as cluster_error: self.annotate_exception(cluster_error, 0, "MULTI") raise # and all the other commands for i, command in enumerate(self._command_queue): if EMPTY_RESPONSE in command.options: errors.append((i, command.options[EMPTY_RESPONSE])) else: try: _ = redis_node.parse_response(connection, "_") except self.SLOT_REDIRECT_ERRORS as slot_error: self.annotate_exception(slot_error, i + 1, command.args) errors.append(slot_error) except self.CONNECTION_ERRORS as cluster_error: self.annotate_exception(cluster_error, i + 1, command.args) raise except ResponseError as e: self.annotate_exception(e, i + 1, command.args) errors.append(e) response = None # parse the EXEC. try: response = redis_node.parse_response(connection, "EXEC") except ExecAbortError: if errors: raise errors[0] raise self._executing = False # EXEC clears any watched keys self._watching = False if response is None: raise WatchError("Watched variable changed.") # put any parse errors into the response for i, e in errors: response.insert(i, e) if len(response) != len(self._command_queue): raise InvalidPipelineStack( "Unexpected response length for cluster pipeline EXEC." " Command stack was {} but response had length {}".format( [c.args[0] for c in self._command_queue], len(response) ) ) # find any errors in the response and raise if necessary if raise_on_error or len(errors) > 0: self._raise_first_error( response, self._command_queue, ) # We have to run response callbacks manually data = [] for r, cmd in zip(response, self._command_queue): if not isinstance(r, Exception): command_name = cmd.args[0] if command_name in self._pipe.cluster_response_callbacks: r = self._pipe.cluster_response_callbacks[command_name]( r, **cmd.options ) data.append(r) return data def reset(self): self._command_queue = [] # make sure to reset the connection state in the event that we were # watching something if self._transaction_connection: try: if self._watching: # call this manually since our unwatch or # immediate_execute_command methods can call reset() self._transaction_connection.send_command("UNWATCH") self._transaction_connection.read_response() # we can safely return the connection to the pool here since we're # sure we're no longer WATCHing anything node = self._nodes_manager.find_connection_owner( self._transaction_connection ) node.redis_connection.connection_pool.release( self._transaction_connection ) self._transaction_connection = None except self.CONNECTION_ERRORS: # disconnect will also remove any previous WATCHes if self._transaction_connection: self._transaction_connection.disconnect() # clean up the other instance attributes self._watching = False self._explicit_transaction = False self._pipeline_slots = set() self._executing = False def send_cluster_commands( self, stack, raise_on_error=True, allow_redirections=True ): raise NotImplementedError( "send_cluster_commands cannot be executed in transactional context." ) def multi(self): if self._explicit_transaction: raise RedisError("Cannot issue nested calls to MULTI") if self._command_queue: raise RedisError( "Commands without an initial WATCH have already been issued" ) self._explicit_transaction = True def watch(self, *names): if self._explicit_transaction: raise RedisError("Cannot issue a WATCH after a MULTI") return self.execute_command("WATCH", *names) def unwatch(self): if self._watching: return self.execute_command("UNWATCH") return True def discard(self): self.reset() def delete(self, *names): return self.execute_command("DEL", *names) def unlink(self, *names): return self.execute_command("UNLINK", *names)
TransactionStrategy
python
facebook__pyre-check
client/configuration/unwatched.py
{ "start": 408, "end": 1571 }
class ____: root: str checksum_path: str @staticmethod def from_json(json: Dict[str, object]) -> "UnwatchedFiles": root = json.get("root", None) if root is None: raise exceptions.InvalidConfiguration( "Missing `root` field in UnwatchedFiles" ) if not isinstance(root, str): raise exceptions.InvalidConfiguration( "`root` field in UnwatchedFiles must be a string" ) checksum_path = json.get("checksum_path", None) if checksum_path is None: raise exceptions.InvalidConfiguration( "Missing `checksum_path` field in UnwatchedFiles" ) if not isinstance(checksum_path, str): raise exceptions.InvalidConfiguration( "`checksum_path` field in UnwatchedFiles must be a string" ) return UnwatchedFiles(root=root, checksum_path=checksum_path) def to_json(self) -> Dict[str, str]: return { "root": self.root, "checksum_path": self.checksum_path, } @dataclasses.dataclass(frozen=True)
UnwatchedFiles
python
pennersr__django-allauth
tests/apps/socialaccount/providers/dwolla/tests.py
{ "start": 240, "end": 1001 }
class ____(OAuth2TestsMixin, TestCase): provider_id = DwollaProvider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """{ "id": "123", "_links":{"account":{"href":"http://localhost"}}, "name":"John Doe" }""", ) def get_login_response_json(self, with_refresh_token=True): rt = "" if with_refresh_token: rt = ',"refresh_token": "testrf"' return ( """{ "uid":"weibo", "access_token":"testac", "_links":{"account":{"href":"http://localhost"}} %s }""" % rt ) def get_expected_to_str(self): return "John Doe"
DwollaTests
python
numpy__numpy
numpy/lib/tests/test_function_base.py
{ "start": 3789, "end": 6582 }
class ____: def test_axes(self): assert_raises(AxisError, np.flip, np.ones(4), axis=1) assert_raises(AxisError, np.flip, np.ones((4, 4)), axis=2) assert_raises(AxisError, np.flip, np.ones((4, 4)), axis=-3) assert_raises(AxisError, np.flip, np.ones((4, 4)), axis=(0, 3)) def test_basic_lr(self): a = get_mat(4) b = a[:, ::-1] assert_equal(np.flip(a, 1), b) a = [[0, 1, 2], [3, 4, 5]] b = [[2, 1, 0], [5, 4, 3]] assert_equal(np.flip(a, 1), b) def test_basic_ud(self): a = get_mat(4) b = a[::-1, :] assert_equal(np.flip(a, 0), b) a = [[0, 1, 2], [3, 4, 5]] b = [[3, 4, 5], [0, 1, 2]] assert_equal(np.flip(a, 0), b) def test_3d_swap_axis0(self): a = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) b = np.array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) assert_equal(np.flip(a, 0), b) def test_3d_swap_axis1(self): a = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) b = np.array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) assert_equal(np.flip(a, 1), b) def test_3d_swap_axis2(self): a = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) b = np.array([[[1, 0], [3, 2]], [[5, 4], [7, 6]]]) assert_equal(np.flip(a, 2), b) def test_4d(self): a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5) for i in range(a.ndim): assert_equal(np.flip(a, i), np.flipud(a.swapaxes(0, i)).swapaxes(i, 0)) def test_default_axis(self): a = np.array([[1, 2, 3], [4, 5, 6]]) b = np.array([[6, 5, 4], [3, 2, 1]]) assert_equal(np.flip(a), b) def test_multiple_axes(self): a = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) assert_equal(np.flip(a, axis=()), a) b = np.array([[[5, 4], [7, 6]], [[1, 0], [3, 2]]]) assert_equal(np.flip(a, axis=(0, 2)), b) c = np.array([[[3, 2], [1, 0]], [[7, 6], [5, 4]]]) assert_equal(np.flip(a, axis=(1, 2)), c)
TestFlip
python
coleifer__peewee
playhouse/postgres_ext.py
{ "start": 6790, "end": 7094 }
class ____(Node): def __init__(self, field, value): self.field = field self.value = value def __sql__(self, ctx): return (ctx .sql(Value(self.value, unpack=False)) .literal('::') .sql(self.field.ddl_datatype(ctx)))
ArrayValue
python
readthedocs__readthedocs.org
readthedocs/profiles/views.py
{ "start": 4440, "end": 5614 }
class ____(DetailView): model = User template_name = "profiles/public/profile_detail.html" lookup_field = "username" def get_object(self): """ Get the user object. If organizations are enabled, show the profile to users in the same organization only. Otherwise, all users can see the profile of others. """ user = super().get_object() if not settings.RTD_ALLOW_ORGANIZATIONS: return user request_user = self.request.user if not request_user.is_authenticated: raise Http404() # Always allow users to see their own profile. if request_user == user: return user # Don't allow members to see another user profile if they don't share the same team. for org in Organization.objects.for_user(request_user): if user in AdminPermission.members(obj=org, user=request_user): return user raise Http404() def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["profile"] = self.get_object().profile return context
ProfileDetail
python
HypothesisWorks__hypothesis
hypothesis-python/tests/django/toystore/models.py
{ "start": 818, "end": 971 }
class ____(models.Model): # See https://github.com/HypothesisWorks/hypothesis/issues/2369 model = models.CharField(max_length=100, unique=True)
Car
python
getsentry__sentry-python
tests/integrations/langgraph/test_langgraph.py
{ "start": 2837, "end": 25618 }
class ____: def __init__(self, name="test_pregel"): self.name = name self.graph_name = name def invoke(self, state, config=None): return {"messages": [MockMessage("Pregel response")]} async def ainvoke(self, state, config=None): return {"messages": [MockMessage("Async Pregel response")]} def test_langgraph_integration_init(): """Test LanggraphIntegration initialization with different parameters.""" integration = LanggraphIntegration() assert integration.include_prompts is True assert integration.identifier == "langgraph" assert integration.origin == "auto.ai.langgraph" integration = LanggraphIntegration(include_prompts=False) assert integration.include_prompts is False assert integration.identifier == "langgraph" assert integration.origin == "auto.ai.langgraph" @pytest.mark.parametrize( "send_default_pii, include_prompts", [ (True, True), (True, False), (False, True), (False, False), ], ) def test_state_graph_compile( sentry_init, capture_events, send_default_pii, include_prompts ): """Test StateGraph.compile() wrapper creates proper create_agent span.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) events = capture_events() graph = MockStateGraph() def original_compile(self, *args, **kwargs): return MockCompiledGraph(self.name) with patch("sentry_sdk.integrations.langgraph.StateGraph"): with start_transaction(): wrapped_compile = _wrap_state_graph_compile(original_compile) compiled_graph = wrapped_compile( graph, model="test-model", checkpointer=None ) assert compiled_graph is not None assert compiled_graph.name == "test_graph" tx = events[0] assert tx["type"] == "transaction" agent_spans = [span for span in tx["spans"] if span["op"] == OP.GEN_AI_CREATE_AGENT] assert len(agent_spans) == 1 agent_span = agent_spans[0] assert agent_span["description"] == "create_agent test_graph" assert agent_span["origin"] == "auto.ai.langgraph" assert agent_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "create_agent" assert agent_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" assert agent_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "test-model" assert SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS in agent_span["data"] tools_data = agent_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] assert tools_data == ["search_tool", "calculator"] assert len(tools_data) == 2 assert "search_tool" in tools_data assert "calculator" in tools_data @pytest.mark.parametrize( "send_default_pii, include_prompts", [ (True, True), (True, False), (False, True), (False, False), ], ) def test_pregel_invoke(sentry_init, capture_events, send_default_pii, include_prompts): """Test Pregel.invoke() wrapper creates proper invoke_agent span.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) events = capture_events() test_state = { "messages": [ MockMessage("Hello, can you help me?", name="user"), MockMessage("Of course! How can I assist you?", name="assistant"), ] } pregel = MockPregelInstance("test_graph") expected_assistant_response = "I'll help you with that task!" expected_tool_calls = [ { "id": "call_test_123", "type": "function", "function": {"name": "search_tool", "arguments": '{"query": "help"}'}, } ] def original_invoke(self, *args, **kwargs): input_messages = args[0].get("messages", []) new_messages = input_messages + [ MockMessage( content=expected_assistant_response, name="assistant", tool_calls=expected_tool_calls, ) ] return {"messages": new_messages} with start_transaction(): wrapped_invoke = _wrap_pregel_invoke(original_invoke) result = wrapped_invoke(pregel, test_state) assert result is not None tx = events[0] assert tx["type"] == "transaction" invoke_spans = [ span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] assert invoke_span["description"] == "invoke_agent test_graph" assert invoke_span["origin"] == "auto.ai.langgraph" assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "test_graph" assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" if send_default_pii and include_prompts: assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] request_messages = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] if isinstance(request_messages, str): import json request_messages = json.loads(request_messages) assert len(request_messages) == 2 assert request_messages[0]["content"] == "Hello, can you help me?" assert request_messages[1]["content"] == "Of course! How can I assist you?" response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == expected_assistant_response assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): import json tool_calls_data = json.loads(tool_calls_data) assert len(tool_calls_data) == 1 assert tool_calls_data[0]["id"] == "call_test_123" assert tool_calls_data[0]["function"]["name"] == "search_tool" else: assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) @pytest.mark.parametrize( "send_default_pii, include_prompts", [ (True, True), (True, False), (False, True), (False, False), ], ) def test_pregel_ainvoke(sentry_init, capture_events, send_default_pii, include_prompts): """Test Pregel.ainvoke() async wrapper creates proper invoke_agent span.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) events = capture_events() test_state = {"messages": [MockMessage("What's the weather like?", name="user")]} pregel = MockPregelInstance("async_graph") expected_assistant_response = "It's sunny and 72°F today!" expected_tool_calls = [ { "id": "call_weather_456", "type": "function", "function": {"name": "get_weather", "arguments": '{"location": "current"}'}, } ] async def original_ainvoke(self, *args, **kwargs): input_messages = args[0].get("messages", []) new_messages = input_messages + [ MockMessage( content=expected_assistant_response, name="assistant", tool_calls=expected_tool_calls, ) ] return {"messages": new_messages} async def run_test(): with start_transaction(): wrapped_ainvoke = _wrap_pregel_ainvoke(original_ainvoke) result = await wrapped_ainvoke(pregel, test_state) return result result = asyncio.run(run_test()) assert result is not None tx = events[0] assert tx["type"] == "transaction" invoke_spans = [ span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] assert invoke_span["description"] == "invoke_agent async_graph" assert invoke_span["origin"] == "auto.ai.langgraph" assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "async_graph" assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "async_graph" if send_default_pii and include_prompts: assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == expected_assistant_response assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): import json tool_calls_data = json.loads(tool_calls_data) assert len(tool_calls_data) == 1 assert tool_calls_data[0]["id"] == "call_weather_456" assert tool_calls_data[0]["function"]["name"] == "get_weather" else: assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) def test_pregel_invoke_error(sentry_init, capture_events): """Test error handling during graph execution.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) events = capture_events() test_state = {"messages": [MockMessage("This will fail")]} pregel = MockPregelInstance("error_graph") def original_invoke(self, *args, **kwargs): raise Exception("Graph execution failed") with start_transaction(), pytest.raises(Exception, match="Graph execution failed"): wrapped_invoke = _wrap_pregel_invoke(original_invoke) wrapped_invoke(pregel, test_state) tx = events[0] invoke_spans = [ span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] assert invoke_span.get("status") == "internal_error" assert invoke_span.get("tags", {}).get("status") == "internal_error" def test_pregel_ainvoke_error(sentry_init, capture_events): """Test error handling during async graph execution.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) events = capture_events() test_state = {"messages": [MockMessage("This will fail async")]} pregel = MockPregelInstance("async_error_graph") async def original_ainvoke(self, *args, **kwargs): raise Exception("Async graph execution failed") async def run_error_test(): with start_transaction(), pytest.raises( Exception, match="Async graph execution failed" ): wrapped_ainvoke = _wrap_pregel_ainvoke(original_ainvoke) await wrapped_ainvoke(pregel, test_state) asyncio.run(run_error_test()) tx = events[0] invoke_spans = [ span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] assert invoke_span.get("status") == "internal_error" assert invoke_span.get("tags", {}).get("status") == "internal_error" def test_span_origin(sentry_init, capture_events): """Test that span origins are correctly set.""" sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) events = capture_events() graph = MockStateGraph() def original_compile(self, *args, **kwargs): return MockCompiledGraph(self.name) with start_transaction(): from sentry_sdk.integrations.langgraph import _wrap_state_graph_compile wrapped_compile = _wrap_state_graph_compile(original_compile) wrapped_compile(graph) tx = events[0] assert tx["contexts"]["trace"]["origin"] == "manual" for span in tx["spans"]: assert span["origin"] == "auto.ai.langgraph" @pytest.mark.parametrize("graph_name", ["my_graph", None, ""]) def test_pregel_invoke_with_different_graph_names( sentry_init, capture_events, graph_name ): """Test Pregel.invoke() with different graph name scenarios.""" sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, send_default_pii=True, ) events = capture_events() pregel = MockPregelInstance(graph_name) if graph_name else MockPregelInstance() if not graph_name: delattr(pregel, "name") delattr(pregel, "graph_name") def original_invoke(self, *args, **kwargs): return {"result": "test"} with start_transaction(): wrapped_invoke = _wrap_pregel_invoke(original_invoke) wrapped_invoke(pregel, {"messages": []}) tx = events[0] invoke_spans = [ span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] if graph_name and graph_name.strip(): assert invoke_span["description"] == "invoke_agent my_graph" assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == graph_name assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == graph_name else: assert invoke_span["description"] == "invoke_agent" assert SPANDATA.GEN_AI_PIPELINE_NAME not in invoke_span.get("data", {}) assert SPANDATA.GEN_AI_AGENT_NAME not in invoke_span.get("data", {}) def test_complex_message_parsing(): """Test message parsing with complex message structures.""" messages = [ MockMessage(content="User query", name="user"), MockMessage( content="Assistant response with tools", name="assistant", tool_calls=[ { "id": "call_1", "type": "function", "function": {"name": "search", "arguments": "{}"}, }, { "id": "call_2", "type": "function", "function": {"name": "calculate", "arguments": '{"x": 5}'}, }, ], ), MockMessage( content="Function call response", name="function", function_call={"name": "search", "arguments": '{"query": "test"}'}, ), ] state = {"messages": messages} result = _parse_langgraph_messages(state) assert result is not None assert len(result) == 3 assert result[0]["content"] == "User query" assert result[0]["name"] == "user" assert "tool_calls" not in result[0] assert "function_call" not in result[0] assert result[1]["content"] == "Assistant response with tools" assert result[1]["name"] == "assistant" assert len(result[1]["tool_calls"]) == 2 assert result[2]["content"] == "Function call response" assert result[2]["name"] == "function" assert result[2]["function_call"]["name"] == "search" def test_extraction_functions_complex_scenario(sentry_init, capture_events): """Test extraction functions with complex scenarios including multiple messages and edge cases.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) events = capture_events() pregel = MockPregelInstance("complex_graph") test_state = {"messages": [MockMessage("Complex request", name="user")]} def original_invoke(self, *args, **kwargs): input_messages = args[0].get("messages", []) new_messages = input_messages + [ MockMessage( content="I'll help with multiple tasks", name="assistant", tool_calls=[ { "id": "call_multi_1", "type": "function", "function": { "name": "search", "arguments": '{"query": "complex"}', }, }, { "id": "call_multi_2", "type": "function", "function": { "name": "calculate", "arguments": '{"expr": "2+2"}', }, }, ], ), MockMessage("", name="assistant"), MockMessage("Final response", name="ai", type="ai"), ] return {"messages": new_messages} with start_transaction(): wrapped_invoke = _wrap_pregel_invoke(original_invoke) result = wrapped_invoke(pregel, test_state) assert result is not None tx = events[0] invoke_spans = [ span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == "Final response" assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] import json tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): tool_calls_data = json.loads(tool_calls_data) assert len(tool_calls_data) == 2 assert tool_calls_data[0]["id"] == "call_multi_1" assert tool_calls_data[0]["function"]["name"] == "search" assert tool_calls_data[1]["id"] == "call_multi_2" assert tool_calls_data[1]["function"]["name"] == "calculate" def test_langgraph_message_role_mapping(sentry_init, capture_events): """Test that Langgraph integration properly maps message roles like 'ai' to 'assistant'""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) events = capture_events() # Mock a langgraph message with mixed roles class MockMessage: def __init__(self, content, message_type="human"): self.content = content self.type = message_type # Create mock state with messages having different roles state_data = { "messages": [ MockMessage("System prompt", "system"), MockMessage("Hello", "human"), MockMessage("Hi there!", "ai"), # Should be mapped to "assistant" MockMessage("How can I help?", "assistant"), # Should stay "assistant" ] } compiled_graph = MockCompiledGraph("test_graph") pregel = MockPregelInstance(compiled_graph) with start_transaction(name="langgraph tx"): # Use the wrapped invoke function directly from sentry_sdk.integrations.langgraph import _wrap_pregel_invoke wrapped_invoke = _wrap_pregel_invoke( lambda self, state_data: {"result": "success"} ) wrapped_invoke(pregel, state_data) (event,) = events span = event["spans"][0] # Verify that the span was created correctly assert span["op"] == "gen_ai.invoke_agent" # If messages were captured, verify role mapping if SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"]: import json stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) # Find messages with specific content to verify role mapping ai_message = next( (msg for msg in stored_messages if msg.get("content") == "Hi there!"), None ) assistant_message = next( (msg for msg in stored_messages if msg.get("content") == "How can I help?"), None, ) if ai_message: # "ai" should have been mapped to "assistant" assert ai_message["role"] == "assistant" if assistant_message: # "assistant" should stay "assistant" assert assistant_message["role"] == "assistant" # Verify no "ai" roles remain roles = [msg["role"] for msg in stored_messages if "role" in msg] assert "ai" not in roles def test_langgraph_message_truncation(sentry_init, capture_events): """Test that large messages are truncated properly in Langgraph integration.""" import json sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) events = capture_events() large_content = ( "This is a very long message that will exceed our size limits. " * 1000 ) test_state = { "messages": [ MockMessage("small message 1", name="user"), MockMessage(large_content, name="assistant"), MockMessage(large_content, name="user"), MockMessage("small message 4", name="assistant"), MockMessage("small message 5", name="user"), ] } pregel = MockPregelInstance("test_graph") def original_invoke(self, *args, **kwargs): return {"messages": args[0].get("messages", [])} with start_transaction(): wrapped_invoke = _wrap_pregel_invoke(original_invoke) result = wrapped_invoke(pregel, test_state) assert result is not None assert len(events) > 0 tx = events[0] assert tx["type"] == "transaction" invoke_spans = [ span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) > 0 invoke_span = invoke_spans[0] assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] messages_data = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) assert isinstance(parsed_messages, list) assert len(parsed_messages) == 2 assert "small message 4" in str(parsed_messages[0]) assert "small message 5" in str(parsed_messages[1]) assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5
MockPregelInstance
python
walkccc__LeetCode
solutions/1430. Check If a String Is a Valid Sequence from Root to Leaves Path in a Binary Tree/1430.py
{ "start": 0, "end": 463 }
class ____: def isValidSequence(self, root: TreeNode | None, arr: list[int]) -> bool: def isValidSequence(root: TreeNode | None, i: int) -> bool: if not root: return False if i == len(arr) - 1: return root.val == arr[i] and not root.left and not root.right return root.val == arr[i] and ( isValidSequence(root.left, i + 1) or isValidSequence(root.right, i + 1)) return isValidSequence(root, 0)
Solution
python
sympy__sympy
sympy/physics/quantum/piab.py
{ "start": 1016, "end": 1668 }
class ____(Ket): """Particle in a box eigenket.""" @classmethod def _eval_hilbert_space(cls, args): return L2(Interval(S.NegativeInfinity, S.Infinity)) @classmethod def dual_class(self): return PIABBra def _represent_default_basis(self, **options): return self._represent_XOp(None, **options) def _represent_XOp(self, basis, **options): x = Symbol('x') n = Symbol('n') subs_info = options.get('subs', {}) return sqrt(2/L)*sin(n*pi*x/L).subs(subs_info) def _eval_innerproduct_PIABBra(self, bra): return KroneckerDelta(bra.label[0], self.label[0])
PIABKet
python
mlflow__mlflow
mlflow/types/utils.py
{ "start": 3543, "end": 27759 }
class ____(MlflowException): def __init__(self, message): super().__init__(message=message, error_code=INVALID_PARAMETER_VALUE) def _infer_datatype(data: Any) -> DataType | Array | Object | AnyType | None: """ Infer the datatype of input data. Data type and inferred schema type mapping: - dict -> Object - list -> Array - numpy.ndarray -> Array - scalar -> DataType - None, empty dictionary/list -> AnyType .. Note:: Empty numpy arrays are inferred as None to keep the backward compatibility, as numpy arrays are used by some traditional ML flavors. e.g. numpy.array([]) -> None, numpy.array([[], []]) -> None While empty lists are inferred as AnyType instead of None after the support of AnyType. e.g. [] -> AnyType, [[], []] -> Array(Any) """ if isinstance(data, pydantic.BaseModel): raise InvalidDataForSignatureInferenceError( message="MLflow does not support inferring model signature from input example " "with Pydantic objects. To use Pydantic objects, define your PythonModel's " "`predict` method with a Pydantic type hint, and model signature will be automatically " "inferred when logging the model. e.g. " "`def predict(self, model_input: list[PydanticType])`. Check " "https://mlflow.org/docs/latest/model/python_model.html#type-hint-usage-in-pythonmodel " "for more details." ) if _is_none_or_nan(data) or (isinstance(data, (list, dict)) and not data): return AnyType() if isinstance(data, dict): properties = [] for k, v in data.items(): dtype = _infer_datatype(v) if dtype is None: raise MlflowException("Dictionary value must not be an empty numpy array.") properties.append( Property(name=k, dtype=dtype, required=not isinstance(dtype, AnyType)) ) return Object(properties=properties) if isinstance(data, (list, np.ndarray)): return _infer_array_datatype(data) return _infer_scalar_datatype(data) def _infer_array_datatype(data: list[Any] | np.ndarray) -> Array | None: """Infer schema from an array. This tries to infer type if there is at least one non-null item in the list, assuming the list has a homogeneous type. However, if the list is empty or all items are null, returns None as a sign of undetermined. E.g. ["a", "b"] => Array(string) ["a", None] => Array(string) [["a", "b"], []] => Array(Array(string)) [["a", "b"], None] => Array(Array(string)) [] => None [None] => Array(Any) Args: data: data to infer from. Returns: Array(dtype) or None if undetermined """ result = None for item in data: dtype = _infer_datatype(item) # Skip item with undetermined type if dtype is None: continue if result is None: result = Array(dtype) elif isinstance(result.dtype, (Array, Object, Map, AnyType)): try: result = Array(result.dtype._merge(dtype)) except MlflowException as e: raise MlflowException.invalid_parameter_value(MULTIPLE_TYPES_ERROR_MSG) from e elif isinstance(result.dtype, DataType): if not isinstance(dtype, AnyType) and dtype != result.dtype: raise MlflowException.invalid_parameter_value(MULTIPLE_TYPES_ERROR_MSG) else: raise MlflowException.invalid_parameter_value( f"{dtype} is not a valid type for an item of a list or numpy array." ) return result # datetime is not included here SCALAR_TO_DATATYPE_MAPPING = { bool: DataType.boolean, np.bool_: DataType.boolean, int: DataType.long, np.int64: DataType.long, np.int32: DataType.integer, float: DataType.double, np.float64: DataType.double, np.float32: DataType.float, str: DataType.string, np.str_: DataType.string, object: DataType.string, bytes: DataType.binary, np.bytes_: DataType.binary, bytearray: DataType.binary, } def _infer_scalar_datatype(data) -> DataType: if data_type := SCALAR_TO_DATATYPE_MAPPING.get(type(data)): return data_type if DataType.check_type(DataType.datetime, data): return DataType.datetime if HAS_PYSPARK: for data_type in DataType.all_types(): if isinstance(data, type(data_type.to_spark())): return data_type raise MlflowException.invalid_parameter_value( f"Data {data} is not one of the supported DataType" ) def _infer_schema(data: Any) -> Schema: """ Infer an MLflow schema from a dataset. Data inputted as a numpy array or a dictionary is represented by :py:class:`TensorSpec`. All other inputted data types are specified by :py:class:`ColSpec`. A `TensorSpec` captures the data shape (default variable axis is 0), the data type (numpy.dtype) and an optional name for each individual tensor of the dataset. A `ColSpec` captures the data type (defined in :py:class:`DataType`) and an optional name for each individual column of the dataset. This method will raise an exception if the user data contains incompatible types or is not passed in one of the supported formats (containers). The input should be one of these: - pandas.DataFrame - pandas.Series - numpy.ndarray - dictionary of (name -> numpy.ndarray) - pyspark.sql.DataFrame - scipy.sparse.csr_matrix/csc_matrix - DataType - List[DataType] - Dict[str, Union[DataType, List, Dict]] - List[Dict[str, Union[DataType, List, Dict]]] The last two formats are used to represent complex data structures. For example, Input Data: [ { 'text': 'some sentence', 'ids': ['id1'], 'dict': {'key': 'value'} }, { 'text': 'some sentence', 'ids': ['id1', 'id2'], 'dict': {'key': 'value', 'key2': 'value2'} }, ] The corresponding pandas DataFrame representation should look like this: output ids dict 0 some sentence [id1, id2] {'key': 'value'} 1 some sentence [id1, id2] {'key': 'value', 'key2': 'value2'} The inferred schema should look like this: Schema([ ColSpec(type=DataType.string, name='output'), ColSpec(type=Array(dtype=DataType.string), name='ids'), ColSpec( type=Object([ Property(name='key', dtype=DataType.string), Property(name='key2', dtype=DataType.string, required=False) ]), name='dict')] ), ]) The element types should be mappable to one of :py:class:`mlflow.models.signature.DataType` for dataframes and to one of numpy types for tensors. Args: data: Dataset to infer from. Returns: Schema """ from scipy.sparse import csc_matrix, csr_matrix # To keep backward compatibility with < 2.9.0, an empty list is inferred as string. # ref: https://github.com/mlflow/mlflow/pull/10125#discussion_r1372751487 if isinstance(data, list) and data == []: return Schema([ColSpec(DataType.string)]) if isinstance(data, list) and all(isinstance(value, dict) for value in data): col_data_mapping = defaultdict(list) for item in data: for k, v in item.items(): col_data_mapping[k].append(v) requiredness = {} for col in col_data_mapping: # if col exists in item but its value is None, then it is not required requiredness[col] = all(item.get(col) is not None for item in data) schema = Schema( [ ColSpec(_infer_colspec_type(values).dtype, name=name, required=requiredness[name]) for name, values in col_data_mapping.items() ] ) elif isinstance(data, dict): # dictionary of (name -> numpy.ndarray) if all(isinstance(values, np.ndarray) for values in data.values()): schema = Schema( [ TensorSpec( type=clean_tensor_type(ndarray.dtype), shape=_get_tensor_shape(ndarray), name=name, ) for name, ndarray in data.items() ] ) # Dict[str, Union[DataType, List, Dict]] else: if any(not isinstance(key, str) for key in data): raise MlflowException("The dictionary keys are not all strings.") schema = Schema( [ ColSpec( _infer_colspec_type(value), name=name, required=_infer_required(value), ) for name, value in data.items() ] ) # pandas.Series elif isinstance(data, pd.Series): name = getattr(data, "name", None) schema = Schema( [ ColSpec( type=_infer_pandas_column(data), name=name, required=_infer_required(data), ) ] ) # pandas.DataFrame elif isinstance(data, pd.DataFrame): schema = Schema( [ ColSpec( type=_infer_pandas_column(data[col]), name=col, required=_infer_required(data[col]), ) for col in data.columns ] ) # numpy.ndarray elif isinstance(data, np.ndarray): schema = Schema( [TensorSpec(type=clean_tensor_type(data.dtype), shape=_get_tensor_shape(data))] ) # scipy.sparse.csr_matrix/csc_matrix elif isinstance(data, (csc_matrix, csr_matrix)): schema = Schema( [TensorSpec(type=clean_tensor_type(data.data.dtype), shape=_get_tensor_shape(data))] ) # pyspark.sql.DataFrame elif _is_spark_df(data): schema = Schema( [ ColSpec( type=_infer_spark_type(field.dataType, data, field.name), name=field.name, # Avoid setting required field for spark dataframe # as the default value for spark df nullable is True # which counterparts to default required=True in ColSpec ) for field in data.schema.fields ] ) elif isinstance(data, list): # Assume list as a single column # List[DataType] # e.g. ['some sentence', 'some sentence'] -> Schema([ColSpec(type=DataType.string)]) # The corresponding pandas DataFrame representation should be pd.DataFrame(data) # We set required=True as unnamed optional inputs is not allowed schema = Schema([ColSpec(_infer_colspec_type(data).dtype)]) else: # DataType # e.g. "some sentence" -> Schema([ColSpec(type=DataType.string)]) try: # We set required=True as unnamed optional inputs is not allowed schema = Schema([ColSpec(_infer_colspec_type(data))]) except MlflowException as e: raise MlflowException.invalid_parameter_value( "Failed to infer schema. Expected one of the following types:\n" "- pandas.DataFrame\n" "- pandas.Series\n" "- numpy.ndarray\n" "- dictionary of (name -> numpy.ndarray)\n" "- pyspark.sql.DataFrame\n" "- scipy.sparse.csr_matrix\n" "- scipy.sparse.csc_matrix\n" "- DataType\n" "- List[DataType]\n" "- Dict[str, Union[DataType, List, Dict]]\n" "- List[Dict[str, Union[DataType, List, Dict]]]\n" f"but got '{data}'.\n" f"Error: {e}", ) if not schema.is_tensor_spec() and any( t in (DataType.integer, DataType.long) for t in schema.input_types() ): warnings.warn( "Hint: Inferred schema contains integer column(s). Integer columns in " "Python cannot represent missing values. If your input data contains " "missing values at inference time, it will be encoded as floats and will " "cause a schema enforcement error. The best way to avoid this problem is " "to infer the model schema based on a realistic data sample (training " "dataset) that includes missing values. Alternatively, you can declare " "integer columns as doubles (float64) whenever these columns may have " "missing values. See `Handling Integers With Missing Values " "<https://www.mlflow.org/docs/latest/models.html#" "handling-integers-with-missing-values>`_ for more details." ) return schema def _infer_numpy_dtype(dtype) -> DataType: supported_types = np.dtype # noinspection PyBroadException try: from pandas.core.dtypes.base import ExtensionDtype supported_types = (np.dtype, ExtensionDtype) except ImportError: # This version of pandas does not support extension types pass if not isinstance(dtype, supported_types): raise TypeError(f"Expected numpy.dtype or pandas.ExtensionDtype, got '{type(dtype)}'.") if dtype.kind == "b": return DataType.boolean elif dtype.kind in {"i", "u"}: if dtype.itemsize < 4 or (dtype.kind == "i" and dtype.itemsize == 4): return DataType.integer elif dtype.itemsize < 8 or (dtype.kind == "i" and dtype.itemsize == 8): return DataType.long elif dtype.kind == "f": if dtype.itemsize <= 4: return DataType.float elif dtype.itemsize <= 8: return DataType.double elif dtype.kind == "U": return DataType.string elif dtype.kind == "S": return DataType.binary elif dtype.kind == "O": raise Exception( "Can not infer object without looking at the values, call _map_numpy_array instead." ) elif dtype.kind == "M": return DataType.datetime raise MlflowException(f"Unsupported numpy data type '{dtype}', kind '{dtype.kind}'") def _is_none_or_nan(x): if isinstance(x, float): return np.isnan(x) # NB: We can't use pd.isna() because the input can be a series. return x is None or x is pd.NA or x is pd.NaT def _infer_required(col) -> bool: if isinstance(col, (list, pd.Series)): return not any(_is_none_or_nan(x) for x in col) return not _is_none_or_nan(col) def _infer_pandas_column(col: pd.Series) -> DataType: if not isinstance(col, pd.Series): raise TypeError(f"Expected pandas.Series, got '{type(col)}'.") if len(col.values.shape) > 1: raise MlflowException(f"Expected 1d array, got array with shape {col.shape}") if col.dtype.kind == "O": col = col.infer_objects() if col.dtype.kind == "O": try: # We convert pandas Series into list and infer the schema. # The real schema for internal field should be the Array's dtype arr_type = _infer_colspec_type(col.to_list()) return arr_type.dtype except Exception as e: # For backwards compatibility, we fall back to string # if the provided array is of string type if pd.api.types.is_string_dtype(col): return DataType.string raise MlflowException(f"Failed to infer schema for pandas.Series {col}. Error: {e}") else: # NB: The following works for numpy types as well as pandas extension types. return _infer_numpy_dtype(col.dtype) def _infer_spark_type(x, data=None, col_name=None) -> DataType: import pyspark.sql.types from pyspark.ml.linalg import VectorUDT from pyspark.sql.functions import col, collect_list if isinstance(x, pyspark.sql.types.NumericType): if isinstance(x, pyspark.sql.types.IntegralType): if isinstance(x, pyspark.sql.types.LongType): return DataType.long else: return DataType.integer elif isinstance(x, pyspark.sql.types.FloatType): return DataType.float elif isinstance(x, pyspark.sql.types.DoubleType): return DataType.double elif isinstance(x, pyspark.sql.types.BooleanType): return DataType.boolean elif isinstance(x, pyspark.sql.types.StringType): return DataType.string elif isinstance(x, pyspark.sql.types.BinaryType): return DataType.binary # NB: Spark differentiates date and timestamps, so we coerce both to TimestampType. elif isinstance(x, (pyspark.sql.types.DateType, pyspark.sql.types.TimestampType)): return DataType.datetime elif isinstance(x, pyspark.sql.types.ArrayType): return Array(_infer_spark_type(x.elementType)) elif isinstance(x, pyspark.sql.types.StructType): return Object( properties=[ Property( name=f.name, dtype=_infer_spark_type(f.dataType), required=not f.nullable, ) for f in x.fields ] ) elif isinstance(x, pyspark.sql.types.MapType): if data is None or col_name is None: raise MlflowException("Cannot infer schema for MapType without data and column name.") # Map MapType to StructType # Note that MapType assumes all values are of same type, # if they're not then spark picks the first item's type # and tries to convert rest to that type. # e.g. # >>> spark.createDataFrame([{"col": {"a": 1, "b": "b"}}]).show() # +-------------------+ # | col| # +-------------------+ # |{a -> 1, b -> null}| # +-------------------+ if isinstance(x.valueType, pyspark.sql.types.MapType): raise MlflowException( "Please construct spark DataFrame with schema using StructType " "for dictionary/map fields, MLflow schema inference only supports " "scalar, array and struct types." ) merged_keys = ( data.selectExpr(f"map_keys({col_name}) as keys") .agg(collect_list(col("keys")).alias("merged_keys")) .head() .merged_keys ) keys = {key for sublist in merged_keys for key in sublist} return Object( properties=[ Property( name=k, dtype=_infer_spark_type(x.valueType), ) for k in keys ] ) elif isinstance(x, VectorUDT): return SparkMLVector() else: raise MlflowException.invalid_parameter_value( f"Unsupported Spark Type '{type(x)}' for MLflow schema." ) def _is_spark_df(x) -> bool: try: import pyspark.sql.dataframe if isinstance(x, pyspark.sql.dataframe.DataFrame): return True except ImportError: return False # For spark 4.0 try: import pyspark.sql.connect.dataframe return isinstance(x, pyspark.sql.connect.dataframe.DataFrame) except ImportError: return False def _validate_input_dictionary_contains_only_strings_and_lists_of_strings(data) -> None: # isinstance(True, int) is True invalid_keys = [ key for key in data.keys() if not isinstance(key, (str, int)) or isinstance(key, bool) ] if invalid_keys: raise MlflowException( f"The dictionary keys are not all strings or indexes. Invalid keys: {invalid_keys}" ) if any(isinstance(value, np.ndarray) for value in data.values()) and not all( isinstance(value, np.ndarray) for value in data.values() ): raise MlflowException("The dictionary values are not all numpy.ndarray.") invalid_values = [ key for key, value in data.items() if (isinstance(value, list) and not all(isinstance(item, (str, bytes)) for item in value)) or (not isinstance(value, (np.ndarray, list, str, bytes))) ] if invalid_values: raise MlflowException.invalid_parameter_value( "Invalid values in dictionary. If passing a dictionary containing strings, all " "values must be either strings or lists of strings. If passing a dictionary containing " "numeric values, the data must be enclosed in a numpy.ndarray. The following keys " f"in the input dictionary are invalid: {invalid_values}", ) def _is_list_str(type_hint: Any) -> bool: return type_hint in [ List[str], # noqa: UP006 list[str], ] def _is_list_dict_str(type_hint: Any) -> bool: return type_hint in [ List[Dict[str, str]], # noqa: UP006 list[Dict[str, str]], # noqa: UP006 List[dict[str, str]], # noqa: UP006 list[dict[str, str]], ] def _get_array_depth(l: Any) -> int: if isinstance(l, np.ndarray): return l.ndim if isinstance(l, list): return max(_get_array_depth(item) for item in l) + 1 if l else 1 return 0 def _infer_type_and_shape(value): if isinstance(value, (list, np.ndarray)): ndim = _get_array_depth(value) if ndim != 1: raise MlflowException.invalid_parameter_value( f"Expected parameters to be 1D array or scalar, got {ndim}D array", ) if all(DataType.check_type(DataType.datetime, v) for v in value): return DataType.datetime, (-1,) value_type = _infer_numpy_dtype(np.array(value).dtype) return value_type, (-1,) elif DataType.check_type(DataType.datetime, value): return DataType.datetime, None elif np.isscalar(value): try: value_type = _infer_numpy_dtype(np.array(value).dtype) return value_type, None except (Exception, MlflowException) as e: raise MlflowException.invalid_parameter_value( f"Failed to infer schema for parameter {value}: {e!r}" ) elif isinstance(value, dict): # reuse _infer_schema to infer schema for dict, wrapping it in a dictionary is # necessary to make sure value is inferred as Object schema = _infer_schema({"value": value}) object_type = schema.inputs[0].type return object_type, None raise MlflowException.invalid_parameter_value( f"Expected parameters to be 1D array or scalar, got {type(value).__name__}", ) def _infer_param_schema(parameters: dict[str, Any]): if not isinstance(parameters, dict): raise MlflowException.invalid_parameter_value( f"Expected parameters to be dict, got {type(parameters).__name__}", ) param_specs = [] invalid_params = [] for name, value in parameters.items(): try: value_type, shape = _infer_type_and_shape(value) param_specs.append( ParamSpec(name=name, dtype=value_type, default=deepcopy(value), shape=shape) ) except Exception as e: invalid_params.append((name, value, e)) if invalid_params: raise MlflowException.invalid_parameter_value( f"Failed to infer schema for parameters: {invalid_params}", ) return ParamSchema(param_specs)
InvalidDataForSignatureInferenceError
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 255324, "end": 256155 }
class ____(_PrintableStructure): _fields_ = [ ('mask', c_uint * 8), ] NVML_WORKLOAD_POWER_MAX_PROFILES = 255 NVML_POWER_PROFILE_MAX_P = 0 NVML_POWER_PROFILE_MAX_Q = 1 NVML_POWER_PROFILE_COMPUTE = 2 NVML_POWER_PROFILE_MEMORY_BOUND = 3 NVML_POWER_PROFILE_NETWORK = 4 NVML_POWER_PROFILE_BALANCED = 5 NVML_POWER_PROFILE_LLM_INFERENCE = 6 NVML_POWER_PROFILE_LLM_TRAINING = 7 NVML_POWER_PROFILE_RBM = 8 NVML_POWER_PROFILE_DCPCIE = 9 NVML_POWER_PROFILE_HMMA_SPARSE = 10 NVML_POWER_PROFILE_HMMA_DENSE = 11 NVML_POWER_PROFILE_SYNC_BALANCED = 12 NVML_POWER_PROFILE_HPC = 13 NVML_POWER_PROFILE_MIG = 14 NVML_POWER_PROFILE_MAX = 15 nvmlWorkloadPowerProfileInfo_v1 = 0x100002c
c_nvmlMask255_t
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/translate.py
{ "start": 4107, "end": 4880 }
class ____(BaseGoogleLink): """ Helper class for constructing Translation results for the text batch translate. Provides link to output results. """ name = "Text Translate Batch" key = "translate_text_batch" format_str = TRANSLATION_TRANSLATE_TEXT_BATCH @staticmethod def extract_output_uri_prefix(output_config): return output_config["gcs_destination"]["output_uri_prefix"].rpartition("gs://")[-1] @classmethod def persist(cls, context: Context, **value): output_config = value.get("output_config") super().persist( context=context, project_id=value.get("project_id"), output_uri_prefix=cls.extract_output_uri_prefix(output_config), )
TranslateTextBatchLink