language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/evolla/modeling_evolla.py | {
"start": 59158,
"end": 62811
} | class ____(EvollaPreTrainedModel, GenerationMixin):
def __init__(self, config):
super().__init__(config)
self.model = EvollaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, self.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
return self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None, # text input ids
attention_mask: Optional[torch.Tensor] = None, # text attention mask
inputs_embeds: Optional[torch.FloatTensor] = None, # text input embeddings
labels: Optional[torch.LongTensor] = None,
protein_input_ids: Optional[torch.LongTensor] = None,
protein_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
):
r"""
protein_input_ids (torch.LongTensor):
The input IDs for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.LongTensor`.
protein_attention_mask (torch.Tensor):
The attention mask for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.Tensor`.
Example:
```python
>>> from transformers import EvollaProcessor, EvollaForProteinText2Text
>>> model = EvollaForProteinText2Text.from_pretrained("westlake/Evolla-10B-hf")
>>> processor = EvollaProcessor.from_pretrained("westlake/Evolla-10B-hf")
>>> protein_information = {
"aa_seq": "your amino acid sequence",
"foldseek": "your foldseek sequence",
}
>>> question = "What is the function of this protein?"
>>> message = [
{"role": "system", "content": "You are an AI expert that can answer any questions about protein."},
{"role": "user", "content": question},
]
>>> inputs = processor(proteins=[protein_information], messages_list=[message], return_tensors="pt", padding="longest")
>>> outputs = model.generate(**inputs)
>>> print(processor.batch_decode(outputs, skip_special_tokens=True))
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
protein_input_ids=protein_input_ids,
protein_attention_mask=protein_attention_mask,
use_cache=use_cache,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size, **kwargs)
lm_outputs = CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
return lm_outputs
__all__ = ["EvollaForProteinText2Text", "EvollaModel", "EvollaPreTrainedModel"]
| EvollaForProteinText2Text |
python | EpistasisLab__tpot | tpot/builtin_modules/zero_count.py | {
"start": 1603,
"end": 2779
} | class ____(TransformerMixin, BaseEstimator ):
"""Adds the count of zeros and count of non-zeros per sample as features."""
def fit(self, X, y=None):
"""Dummy function to fit in with the sklearn API."""
return self
def transform(self, X, y=None):
"""Transform data by adding two virtual features.
Parameters
----------
X: numpy ndarray, {n_samples, n_components}
New data, where n_samples is the number of samples and n_components
is the number of components.
y: None
Unused
Returns
-------
X_transformed: array-like, shape (n_samples, n_features)
The transformed feature set
"""
X = check_array(X)
n_features = X.shape[1]
X_transformed = np.copy(X)
non_zero_vector = np.count_nonzero(X_transformed, axis=1)
non_zero = np.reshape(non_zero_vector, (-1, 1))
zero_col = np.reshape(n_features - non_zero_vector, (-1, 1))
X_transformed = np.hstack((non_zero, X_transformed))
X_transformed = np.hstack((zero_col, X_transformed))
return X_transformed
| ZeroCount |
python | fastapi__sqlmodel | docs_src/tutorial/indexes/tutorial002_py310.py | {
"start": 71,
"end": 1596
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.age <= 35)
results = session.exec(statement)
for hero in results:
print(hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI036.py | {
"start": 3021,
"end": 3185
} | class ____:
def __exit__(self, *args: Any) -> None: ... # PYI036: Bad star-args annotation
async def __aexit__(self) -> None: ... # PYI036: Missing args
| BadOne |
python | xlwings__xlwings | xlwings/base_classes.py | {
"start": 500,
"end": 3068
} | class ____:
@property
def xl(self):
raise NotImplementedError()
@xl.setter
def xl(self, value):
raise NotImplementedError()
@property
def api(self):
raise NotImplementedError()
@property
def selection(self):
raise NotImplementedError()
def activate(self, steal_focus=False):
raise NotImplementedError()
@property
def visible(self):
raise NotImplementedError()
@visible.setter
def visible(self, visible):
raise NotImplementedError()
def quit(self):
raise NotImplementedError()
def kill(self):
raise NotImplementedError()
@property
def screen_updating(self):
raise NotImplementedError()
@screen_updating.setter
def screen_updating(self, value):
raise NotImplementedError()
@property
def display_alerts(self):
raise NotImplementedError()
@display_alerts.setter
def display_alerts(self, value):
raise NotImplementedError()
@property
def enable_events(self):
raise NotImplementedError()
@enable_events.setter
def enable_events(self, value):
raise NotImplementedError()
@property
def interactive(self):
raise NotImplementedError()
@interactive.setter
def interactive(self, value):
raise NotImplementedError()
@property
def startup_path(self):
raise NotImplementedError()
@property
def calculation(self):
raise NotImplementedError()
@calculation.setter
def calculation(self, value):
raise NotImplementedError()
def calculate(self):
raise NotImplementedError()
@property
def version(self):
raise NotImplementedError()
@property
def books(self):
raise NotImplementedError()
@property
def hwnd(self):
raise NotImplementedError()
@property
def path(self):
raise NotImplementedError()
@property
def pid(self):
raise NotImplementedError()
def run(self, macro, args):
raise NotImplementedError()
@property
def status_bar(self):
raise NotImplementedError()
@status_bar.setter
def status_bar(self, value):
raise NotImplementedError()
@property
def cut_copy_mode(self):
raise NotImplementedError()
@cut_copy_mode.setter
def cut_copy_mode(self, value):
raise NotImplementedError()
def alert(self, prompt, title, buttons, mode, callback):
raise NotImplementedError()
| App |
python | sphinx-doc__sphinx | sphinx/ext/inheritance_diagram.py | {
"start": 3728,
"end": 12530
} | class ____:
"""Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(
self,
class_names: list[str],
currmodule: str,
show_builtins: bool = False,
private_bases: bool = False,
parts: int = 0,
aliases: dict[str, str] | None = None,
top_classes: Set[str] = frozenset(),
include_subclasses: bool = False,
) -> None:
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes: Collection[type[Any]] = self._import_classes(class_names, currmodule)
if include_subclasses:
classes_set = {*classes}
for cls in tuple(classes_set):
classes_set.update(_subclasses(cls))
classes = classes_set
self.class_info = self._class_info(
classes, show_builtins, private_bases, parts, aliases, top_classes
)
if not self.class_info:
msg = 'No classes found for inheritance diagram'
raise InheritanceException(msg)
def _import_classes(
self, class_names: list[str], currmodule: str
) -> Sequence[type[Any]]:
"""Import a list of classes."""
classes: list[type[Any]] = []
for name in class_names:
classes.extend(import_classes(name, currmodule))
return classes
def _class_info(
self,
classes: Collection[type[Any]],
show_builtins: bool,
private_bases: bool,
parts: int,
aliases: dict[str, str] | None,
top_classes: Set[str],
) -> list[tuple[str, str, Sequence[str], str | None]]:
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts to include in the
displayed node names, from right to left. If given as a negative, the
number of parts to drop from the left. A value of 0 displays the full
dotted name. E.g. ``sphinx.ext.inheritance_diagram.InheritanceGraph``
with ``parts=2`` or ``parts=-2`` gets displayed as
``inheritance_diagram.InheritanceGraph``, and as
``ext.inheritance_diagram.InheritanceGraph`` with ``parts=3`` or
``parts=-1``.
*top_classes* gives the name(s) of the top most ancestor class to
traverse to. Multiple names can be specified separated by comma.
"""
all_classes = {}
def recurse(cls: type[Any]) -> None:
if not show_builtins and cls in PY_BUILTINS:
return
if not private_bases and cls.__name__.startswith('_'):
return
nodename = self.class_name(cls, parts, aliases)
fullname = self.class_name(cls, 0, aliases)
# Use first line of docstring as tooltip, if available
tooltip = None
try:
if cls.__doc__:
doc = cls.__doc__.strip().split('\n')[0]
if doc:
tooltip = '"%s"' % doc.replace('"', '\\"')
except Exception: # might raise AttributeError for strange classes
pass
baselist: list[str] = []
all_classes[cls] = (nodename, fullname, baselist, tooltip)
if fullname in top_classes:
return
for base in cls.__bases__:
if not show_builtins and base in PY_BUILTINS:
continue
if not private_bases and base.__name__.startswith('_'):
continue
baselist.append(self.class_name(base, parts, aliases))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return [
(cls_name, fullname, tuple(bases), tooltip)
for (cls_name, fullname, bases, tooltip) in all_classes.values()
]
def class_name(
self, cls: type[Any], parts: int = 0, aliases: dict[str, str] | None = None
) -> str:
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module in {'__builtin__', 'builtins'}:
fullname = cls.__name__
else:
fullname = f'{module}.{cls.__qualname__}'
if parts == 0:
result = fullname
else:
name_parts = fullname.split('.')
result = '.'.join(name_parts[-parts:])
if aliases is not None and result in aliases:
return aliases[result]
return result
def get_all_class_names(self) -> list[str]:
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info]
# These are the default attrs for graphviz
default_graph_attrs: dict[str, float | int | str] = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
'bgcolor': 'transparent',
}
default_node_attrs: dict[str, float | int | str] = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans"',
'style': '"setlinewidth(0.5),filled"',
'fillcolor': 'white',
}
default_edge_attrs: dict[str, float | int | str] = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs: dict[str, float | int | str]) -> str:
return ','.join(f'{k}={v}' for k, v in sorted(attrs.items()))
def _format_graph_attrs(self, attrs: dict[str, float | int | str]) -> str:
return ''.join(f'{k}={v};\n' for k, v in sorted(attrs.items()))
def generate_dot(
self,
name: str,
urls: dict[str, str] | None = None,
env: BuildEnvironment | None = None,
graph_attrs: dict[str, float | int | str] | None = None,
node_attrs: dict[str, float | int | str] | None = None,
edge_attrs: dict[str, float | int | str] | None = None,
) -> str:
config = env.config if env is not None else None
return self._generate_dot(
name, urls, config, graph_attrs, node_attrs, edge_attrs
)
def _generate_dot(
self,
name: str,
urls: dict[str, str] | None = None,
config: Config | None = None,
graph_attrs: dict[str, float | int | str] | None = None,
node_attrs: dict[str, float | int | str] | None = None,
edge_attrs: dict[str, float | int | str] | None = None,
) -> str:
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
if urls is None:
urls = {}
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
if graph_attrs is not None:
g_attrs.update(graph_attrs)
if node_attrs is not None:
n_attrs.update(node_attrs)
if edge_attrs is not None:
e_attrs.update(edge_attrs)
if config:
g_attrs.update(config.inheritance_graph_attrs)
n_attrs.update(config.inheritance_node_attrs)
e_attrs.update(config.inheritance_edge_attrs)
res: list[str] = [
f'digraph {name} {{\n',
self._format_graph_attrs(g_attrs),
]
for cls_name, fullname, bases, tooltip in sorted(self.class_info):
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = f'"{urls[fullname]}"'
this_node_attrs['target'] = '"_top"'
if tooltip:
this_node_attrs['tooltip'] = tooltip
res.append(
f' "{cls_name}" [{self._format_node_attrs(this_node_attrs)}];\n'
)
# Write the edges
res.extend(
f' "{base_name}" -> "{cls_name}" [{self._format_node_attrs(e_attrs)}];\n'
for base_name in bases
)
res.append('}\n')
return ''.join(res)
| InheritanceGraph |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/policy/checkpoint_manager.py | {
"start": 456,
"end": 3958
} | class ____:
@staticmethod
def get_checkpoints(behavior_name: str) -> List[Dict[str, Any]]:
checkpoint_list = GlobalTrainingStatus.get_parameter_state(
behavior_name, StatusType.CHECKPOINTS
)
if not checkpoint_list:
checkpoint_list = []
GlobalTrainingStatus.set_parameter_state(
behavior_name, StatusType.CHECKPOINTS, checkpoint_list
)
return checkpoint_list
@staticmethod
def remove_checkpoint(checkpoint: Dict[str, Any]) -> None:
"""
Removes a checkpoint stored in checkpoint_list.
If checkpoint cannot be found, no action is done.
:param checkpoint: A checkpoint stored in checkpoint_list
"""
file_paths: List[str] = [checkpoint["file_path"]]
file_paths.extend(checkpoint["auxillary_file_paths"])
for file_path in file_paths:
if os.path.exists(file_path):
os.remove(file_path)
logger.debug(f"Removed checkpoint model {file_path}.")
else:
logger.debug(f"Checkpoint at {file_path} could not be found.")
return
@classmethod
def _cleanup_extra_checkpoints(
cls, checkpoints: List[Dict], keep_checkpoints: int
) -> List[Dict]:
"""
Ensures that the number of checkpoints stored are within the number
of checkpoints the user defines. If the limit is hit, checkpoints are
removed to create room for the next checkpoint to be inserted.
:param behavior_name: The behavior name whose checkpoints we will mange.
:param keep_checkpoints: Number of checkpoints to record (user-defined).
"""
while len(checkpoints) > keep_checkpoints:
if keep_checkpoints <= 0 or len(checkpoints) == 0:
break
ModelCheckpointManager.remove_checkpoint(checkpoints.pop(0))
return checkpoints
@classmethod
def add_checkpoint(
cls, behavior_name: str, new_checkpoint: ModelCheckpoint, keep_checkpoints: int
) -> None:
"""
Make room for new checkpoint if needed and insert new checkpoint information.
:param behavior_name: Behavior name for the checkpoint.
:param new_checkpoint: The new checkpoint to be recorded.
:param keep_checkpoints: Number of checkpoints to record (user-defined).
"""
new_checkpoint_dict = attr.asdict(new_checkpoint)
checkpoints = cls.get_checkpoints(behavior_name)
checkpoints.append(new_checkpoint_dict)
cls._cleanup_extra_checkpoints(checkpoints, keep_checkpoints)
GlobalTrainingStatus.set_parameter_state(
behavior_name, StatusType.CHECKPOINTS, checkpoints
)
@classmethod
def track_final_checkpoint(
cls, behavior_name: str, final_checkpoint: ModelCheckpoint
) -> None:
"""
Ensures number of checkpoints stored is within the max number of checkpoints
defined by the user and finally stores the information about the final
model (or intermediate model if training is interrupted).
:param behavior_name: Behavior name of the model.
:param final_checkpoint: Checkpoint information for the final model.
"""
final_model_dict = attr.asdict(final_checkpoint)
GlobalTrainingStatus.set_parameter_state(
behavior_name, StatusType.FINAL_CHECKPOINT, final_model_dict
)
| ModelCheckpointManager |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py | {
"start": 3139,
"end": 9401
} | class ____:
def evaluate_lazy_field(self, field):
if callable(field):
return field()
else:
return field
def evaluate_lazy_payload(self, payload):
def _evaluate(field):
if isinstance(field, dict):
return self.evaluate_lazy_payload(field)
return self.evaluate_lazy_field(field)
return {key: _evaluate(value) for key, value in payload.items()}
def test_complex_evaluator(evaluator: ComplexEvaluator):
_test_sink(evaluator.evaluate_lazy_payload(_test_source()))
# Test tito collapse depth.
def obscure_tito(x):
...
def into_dict_then_tito_collapse(x):
d = {"a": x}
return obscure_tito(d)
def tito_collapse_then_into_dict(x):
y = obscure_tito(x)
return {"a": y}
def issue_with_into_dict_then_tito_collapse():
x = _test_source()
y = into_dict_then_tito_collapse(x)
_test_sink(y["b"]) # This is an issue.
def no_issue_with_tito_collapse_then_into_dict():
x = _test_source()
y = tito_collapse_then_into_dict(x)
_test_sink(y["b"]) # Not an issue.
def perfect_tito(x):
return x
def into_dict_then_perfect_tito(x):
d = {"a": x}
return perfect_tito(d)
def perfect_tito_then_into_dict(x):
y = perfect_tito(x)
return {"a": y}
def no_issue_with_into_dict_then_perfect_tito():
x = _test_source()
y = into_dict_then_perfect_tito(x)
_test_sink(y["b"]) # Not an issue.
def no_issue_with_perfect_tito_then_into_dict():
x = _test_source()
y = perfect_tito_then_into_dict(x)
_test_sink(y["b"]) # Not an issue.
def issue_approximate_return_access_paths():
x = Object()
x.a = _test_source()
y = approximate_return_access_paths(x)
_test_sink(y["a"]) # This is an issue.
def issue_approximate_return_access_paths_common_prefix():
x = Object()
x.y.a = _test_source()
y = approximate_return_access_paths(x)
_test_sink(y["a"]) # This is an issue.
def non_issue_approximate_return_access_paths_common_prefix():
x = Object()
x.a = _test_source()
y = approximate_return_access_paths(x)
# This is not an issue, but triggers a false positive, which is expected behavior.
_test_sink(y["a"])
def perfect_tito_with_tree_manipulation(x):
d = {"a": x}
return d["a"]
def tito_collapse_one_append_a_b_c(x):
return {"a": {"b": {"c": x}}}
def tito_collapse_one(x):
y = tito_collapse_one_append_a_b_c(x)
return y["a"]["b"]["c"]
def tito_collapse_two_append_a_b(x):
return {"a": {"b": x}}
def tito_collapse_two(x):
y = tito_collapse_two_append_a_b(x)
return y["a"]["b"]
def tito_collapse_three_append_a(x):
return {"a": x}
def tito_collapse_three(x):
y = tito_collapse_three_append_a(x)
return y["a"]
def into_dict_then_collapse_two(x):
d = {"a": x}
return tito_collapse_two(d)
def collapse_two_then_into_dict(x):
y = tito_collapse_two(x)
return {"a": y}
def perfect_tito_then_into_deep_dict(x):
y = perfect_tito(x)
return {"a": {"b": {"c": {"d": {"e": y}}}}}
def collapse_two_then_into_deep_dict(x):
y = tito_collapse_two(x)
return {"a": {"b": {"c": {"d": {"e": y}}}}}
def combine_collapse_one(arg):
x = {"a": arg}
y = tito_collapse_one(x)
z = {"a": y}
t = tito_collapse_one(z)
return t
def combine_collapse_two(arg):
x = {"a": arg}
y = tito_collapse_two(x)
z = {"a": y}
t = tito_collapse_two(z)
return t
def combine_collapse_three(arg):
x = {"a": arg}
y = tito_collapse_three(x)
z = {"a": y}
t = tito_collapse_three(z)
return t
def combine_collapse_two_and_one(arg):
x = {"a": arg}
y = tito_collapse_two(x)
z = {"a": y}
t = tito_collapse_one(z)
return t
def combine_collapse_one_and_two(arg):
x = {"a": arg}
y = tito_collapse_one(x)
z = {"a": y}
t = tito_collapse_two(z)
return t
def loop_perfect_tito(x):
for _ in range(100):
x = {"a": x}
x = perfect_tito(x)
return x
def loop_tito_collapse_one(x):
for _ in range(100):
x = {"a": x}
x = tito_collapse_one(x)
return x
def loop_tito_collapse_two(x):
for _ in range(100):
x = {"a": x}
x = tito_collapse_two(x)
return x
def join_tito_collapse_test_1(x):
result = Object()
if 1 > 2:
result.a = tito_collapse_two(x)
else:
result.a.b = tito_collapse_one(x)
return result
def join_tito_collapse_test_2(x):
result = Object()
if 1 > 2:
result.a = tito_collapse_two(x)
else:
result.a.b = tito_collapse_three(x)
return result
def tito_collapse_one_with_input_path(x):
return tito_collapse_one(x["a"]["b"])
def tito_collapse_one_with_input_path_with_hop(x):
return tito_collapse_one_with_input_path(x)
def no_issue_tito_collapse_two_with_input_path():
x = {"a": {"b": {"c": _test_source(), "d": 0}}}
y = tito_collapse_one_with_input_path(x)
_test_sink(y["d"])
def join_tito_collapse_test_3(x):
if 1 > 2:
return tito_collapse_one(x)
else:
return {"foo": tito_collapse_two(x)}
def issue_join_tito_collapse_test_3():
x = {"a": _test_source()}
y = join_tito_collapse_test_3(x)
_test_sink(y["foo"]["a"]) # This is an issue.
def user_declared_tito_no_collapse(arg):
return
def no_issue_user_declared_tito_no_collapse():
x = {"a": _test_source()}
y = user_declared_tito_no_collapse(x)
_test_sink(y["b"])
def user_declared_tito_collapse_one(arg):
return
def no_issue_user_declared_tito_collapse_one():
x = {"a": _test_source()}
y = user_declared_tito_collapse_one(x)
_test_sink(y["b"])
def issue_user_declared_tito_collapse_one():
x = {"a": {"b": _test_source()}}
y = user_declared_tito_collapse_one(x)
_test_sink(y["a"]["c"])
# Test false positives with the backward analysis.
def no_tito_init_then_overwrite(x):
d = {"a": x}
d["a"] = 0
return d # TODO(T146774878): Wrongly infers tito
def no_tito_overwrite_then_init(d):
d["a"] = 0
return d["a"] # Properly infers no tito
def tito_with_sink(d):
x = d["a"]
_test_sink(d["a"])
return x
# Test tito to self.
| ComplexEvaluator |
python | huggingface__transformers | src/transformers/models/ibert/modeling_ibert.py | {
"start": 31627,
"end": 32497
} | class ____(nn.Module):
"""I-BERT Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@auto_docstring(
custom_intro="""
I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
"""
)
| IBertLMHead |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 75603,
"end": 76737
} | class ____(Response):
"""
Response of models.make_public endpoint.
:param updated: Number of models updated
:type updated: int
"""
_service = "models"
_action = "make_public"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of models updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePublicResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| MakePublicResponse |
python | scipy__scipy | scipy/spatial/tests/test_kdtree.py | {
"start": 4171,
"end": 4516
} | class ____(ConsistencyTests):
def setup_method(self):
self.n = 100
self.m = 4
np.random.seed(1234)
self.data = np.random.randn(self.n, self.m)
self.kdtree = self.kdtree_type(self.data, leafsize=2)
self.x = np.random.randn(self.m)
self.d = 0.2
self.k = 10
@KDTreeTest
| _Test_random |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 7273,
"end": 7761
} | class ____(InvalidRequestError):
"""
If the server supporting PKCE does not support the requested
transformation, the authorization endpoint MUST return the
authorization error response with "error" value set to
"invalid_request". The "error_description" or the response of
"error_uri" SHOULD explain the nature of error, e.g., transform
algorithm not supported.
"""
description = 'Transform algorithm not supported.'
| UnsupportedCodeChallengeMethodError |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/transfers/s3_to_ftp.py | {
"start": 1171,
"end": 3014
} | class ____(BaseOperator):
"""
This operator enables the transferring of files from S3 to a FTP server.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToFTPOperator`
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket from
where the file is downloaded.
:param s3_key: The targeted s3 key. This is the specified file path for
downloading the file from S3.
:param ftp_path: The ftp remote path. This is the specified file path for
uploading file to the FTP server.
:param aws_conn_id: reference to a specific AWS connection
:param ftp_conn_id: The ftp connection id. The name or identifier for
establishing a connection to the FTP server.
"""
template_fields: Sequence[str] = ("s3_bucket", "s3_key", "ftp_path")
def __init__(
self,
*,
s3_bucket,
s3_key,
ftp_path,
aws_conn_id="aws_default",
ftp_conn_id="ftp_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.ftp_path = ftp_path
self.aws_conn_id = aws_conn_id
self.ftp_conn_id = ftp_conn_id
def execute(self, context: Context):
s3_hook = S3Hook(self.aws_conn_id)
ftp_hook = FTPHook(ftp_conn_id=self.ftp_conn_id)
s3_obj = s3_hook.get_key(self.s3_key, self.s3_bucket)
with NamedTemporaryFile() as local_tmp_file:
self.log.info("Downloading file from %s", self.s3_key)
s3_obj.download_fileobj(local_tmp_file)
local_tmp_file.seek(0)
ftp_hook.store_file(self.ftp_path, local_tmp_file.name)
self.log.info("File stored in %s", {self.ftp_path})
| S3ToFTPOperator |
python | spack__spack | lib/spack/spack/test/cmd/repo.py | {
"start": 9708,
"end": 30506
} | class ____(spack.repo.RepoDescriptor):
def __init__(self, to_construct: Dict[str, Union[spack.repo.Repo, Exception]]):
self.to_construct = to_construct
self.initialized = False
def initialize(self, fetch=True, git=None) -> None:
self.initialized = True
def get_commit(self, git: Optional[Executable] = None):
pass
def update(self, git: Optional[Executable] = None, remote: Optional[str] = "origin") -> None:
pass
def construct(self, cache, overrides=None):
assert self.initialized, "MockDescriptor must be initialized before construction"
return self.to_construct
def make_repo_config(repo_config: Optional[dict] = None) -> spack.config.Configuration:
"""Create a Configuration instance with writable scope and optional repo configuration."""
scope = spack.config.InternalConfigScope("test", {"repos": repo_config or {}})
scope.writable = True
config = spack.config.Configuration()
config.push_scope(scope)
return config
def test_add_repo_name_already_exists(tmp_path: pathlib.Path):
"""Test _add_repo raises error when name already exists in config."""
# Set up existing config with the same name
config = make_repo_config({"test_name": "/some/path"})
# Should raise error when name already exists
with pytest.raises(SpackError, match="A repository with the name 'test_name' already exists"):
spack.cmd.repo._add_repo(
str(tmp_path), name="test_name", scope=None, paths=[], destination=None, config=config
)
def test_add_repo_destination_with_local_path(tmp_path: pathlib.Path):
"""Test _add_repo raises error when args are added that do not apply to local paths."""
# Should raise error when destination is provided with local path
with pytest.raises(
SpackError, match="The 'destination' argument is only valid for git repositories"
):
spack.cmd.repo._add_repo(
str(tmp_path),
name="test_name",
scope=None,
paths=[],
destination="/some/destination",
config=make_repo_config(),
)
with pytest.raises(SpackError, match="The --paths flag is only valid for git repositories"):
spack.cmd.repo._add_repo(
str(tmp_path),
name="test_name",
scope=None,
paths=["path1", "path2"],
destination=None,
config=make_repo_config(),
)
def test_add_repo_computed_key_already_exists(tmp_path: pathlib.Path, monkeypatch):
"""Test _add_repo raises error when computed key already exists in config."""
def mock_parse_config_descriptor(name, entry, lock):
return MockDescriptor({str(tmp_path): MockRepo("test_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
# Should raise error when computed key already exists
with pytest.raises(SpackError, match="A repository with the name 'test_repo' already exists"):
spack.cmd.repo._add_repo(
str(tmp_path),
name=None, # Will use namespace as key
scope=None,
paths=[],
destination=None,
config=make_repo_config({"test_repo": "/some/path"}),
)
def test_add_repo_git_url_with_paths(monkeypatch):
"""Test _add_repo correctly handles git URL with multiple paths."""
config = make_repo_config({"test_repo": "/some/path"})
def mock_parse_config_descriptor(name, entry, lock):
# Verify the entry has the expected git structure
assert "git" in entry
assert entry["git"] == "https://example.com/repo.git"
assert entry["paths"] == ["path1", "path2"]
return MockDescriptor({"/some/path": MockRepo("git_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
# Should succeed with git URL and multiple paths
key = spack.cmd.repo._add_repo(
"https://example.com/repo.git",
name="git_test",
scope=None,
paths=["path1", "path2"],
destination=None,
config=config,
)
assert key == "git_test"
repos = config.get("repos", scope=None)
assert "git_test" in repos
assert repos["git_test"]["git"] == "https://example.com/repo.git"
assert repos["git_test"]["paths"] == ["path1", "path2"]
def test_add_repo_git_url_with_destination(monkeypatch):
"""Test _add_repo correctly handles git URL with destination."""
config = make_repo_config({"test_repo": "/some/path"})
def mock_parse_config_descriptor(name, entry, lock):
# Verify the entry has the expected git structure
assert "git" in entry
assert entry["git"] == "https://example.com/repo.git"
assert entry["destination"] == "/custom/destination"
return MockDescriptor({"/some/path": MockRepo("git_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
# Should succeed with git URL and destination
key = spack.cmd.repo._add_repo(
"https://example.com/repo.git",
name="git_test",
scope=None,
paths=[],
destination="/custom/destination",
config=config,
)
assert key == "git_test"
repos = config.get("repos", scope=None)
assert "git_test" in repos
assert repos["git_test"]["git"] == "https://example.com/repo.git"
assert repos["git_test"]["destination"] == "/custom/destination"
def test_add_repo_ssh_git_url_detection(monkeypatch):
"""Test _add_repo correctly detects SSH git URLs."""
config = make_repo_config({"test_repo": "/some/path"})
def mock_parse_config_descriptor(name, entry, lock):
# Verify the entry has the expected git structure
assert "git" in entry
assert entry["git"] == "git@github.com:user/repo.git"
return MockDescriptor({"/some/path": MockRepo("git_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
# Should detect SSH URL as git URL (colon not preceded by forward slash)
key = spack.cmd.repo._add_repo(
"git@github.com:user/repo.git",
name="ssh_git_test",
scope=None,
paths=[],
destination=None,
config=config,
)
assert key == "ssh_git_test"
repos = config.get("repos", scope=None)
assert "ssh_git_test" in repos
assert repos["ssh_git_test"]["git"] == "git@github.com:user/repo.git"
def test_add_repo_no_usable_repositories_error(monkeypatch):
"""Test that _add_repo raises SpackError when no usable repositories can be constructed."""
config = make_repo_config()
def mock_parse_config_descriptor(name, entry, lock):
return MockDescriptor(
{"/path1": Exception("Invalid repo"), "/path2": Exception("Another error")}
)
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
with pytest.raises(
SpackError, match="No package repository could be constructed from /invalid/path"
):
spack.cmd.repo._add_repo(
"/invalid/path",
name="test_repo",
scope=None,
paths=[],
destination=None,
config=config,
)
def test_add_repo_multiple_repos_no_name_error(monkeypatch):
"""Test that _add_repo raises SpackError when multiple repositories found without
specifying --name."""
def mock_parse_config_descriptor(name, entry, lock):
return MockDescriptor({"/path1": MockRepo("repo1"), "/path2": MockRepo("repo2")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
with pytest.raises(
SpackError, match="Multiple package repositories found, please specify a name"
):
spack.cmd.repo._add_repo(
"/path/with/multiple/repos",
name=None, # No name specified
scope=None,
paths=[],
destination=None,
config=make_repo_config(),
)
def test_add_repo_git_url_basic_success(monkeypatch):
"""Test successful addition of a git repository."""
config = make_repo_config()
def mock_parse_config_descriptor(name, entry, lock):
# Verify git entry structure
assert isinstance(entry, dict)
assert entry["git"] == "https://github.com/example/repo.git"
return MockDescriptor({"/git/path": MockRepo("git_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
key = spack.cmd.repo._add_repo(
"https://github.com/example/repo.git",
name="test_git_repo",
scope=None,
paths=[],
destination=None,
config=config,
)
assert key == "test_git_repo"
repos_config = config.get("repos", scope=None)
assert "test_git_repo" in repos_config
assert "git" in repos_config["test_git_repo"]
def test_add_repo_git_url_with_custom_destination(monkeypatch):
"""Test successful addition of a git repository with destination."""
config = make_repo_config()
def mock_parse_config_descriptor(name, entry, lock):
# Verify git entry structure with destination
assert isinstance(entry, dict)
assert "git" in entry
assert "destination" in entry
assert entry["destination"] == "/custom/destination"
return MockDescriptor({"/git/path": MockRepo("git_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
key = spack.cmd.repo._add_repo(
"git@github.com:example/repo.git",
name="test_git_repo",
scope=None,
paths=[],
destination="/custom/destination",
config=config,
)
assert key == "test_git_repo"
def test_add_repo_git_url_with_single_repo_path_new(monkeypatch):
"""Test successful addition of a git repository with repo_path."""
config = make_repo_config()
def mock_parse_config_descriptor(name, entry, lock):
# Verify git entry structure with repo_path
assert isinstance(entry, dict)
assert "git" in entry
assert "paths" in entry
assert entry["paths"] == ["subdirectory/repo"]
return MockDescriptor({"/git/path": MockRepo("git_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
key = spack.cmd.repo._add_repo(
"https://github.com/example/repo.git",
name="test_git_repo",
scope=None,
paths=["subdirectory/repo"],
destination=None,
config=config,
)
assert key == "test_git_repo"
def test_add_repo_local_path_success(monkeypatch, tmp_path: pathlib.Path):
"""Test successful addition of a local repository."""
config = make_repo_config()
def mock_parse_config_descriptor(name, entry, lock):
# Verify local path entry
assert isinstance(entry, str)
return MockDescriptor({str(tmp_path): MockRepo("test_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
key = spack.cmd.repo._add_repo(
str(tmp_path),
name="test_local_repo",
scope=None,
paths=[],
destination=None,
config=config,
)
assert key == "test_local_repo"
# Verify the local path was added
repos_config = config.get("repos")
assert "test_local_repo" in repos_config
assert repos_config["test_local_repo"] == str(tmp_path)
def test_add_repo_auto_name_from_namespace(monkeypatch, tmp_path: pathlib.Path):
"""Test successful addition of a repository with auto-generated name from namespace."""
config = make_repo_config()
def mock_parse_config_descriptor(name, entry, lock):
return MockDescriptor({str(tmp_path): MockRepo("auto_name_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
key = spack.cmd.repo._add_repo(
str(tmp_path),
name=None, # No name specified, should use namespace
scope=None,
paths=[],
destination=None,
config=config,
)
assert key == "auto_name_repo"
# Verify the repo was added with the namespace as key
repos_config = config.get("repos", scope=None)
assert "auto_name_repo" in repos_config
assert repos_config["auto_name_repo"] == str(tmp_path)
def test_add_repo_partial_repo_construction_warning(monkeypatch, capsys):
"""Test that _add_repo issues warnings for repos that can't be constructed but
succeeds if at least one can be."""
def mock_parse_config_descriptor(name, entry, lock):
return MockDescriptor(
{
"/good/path": MockRepo("good_repo"),
"/bad/path": Exception("Failed to construct repo"),
}
)
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
key = spack.cmd.repo._add_repo(
"/mixed/path",
name="test_mixed_repo",
scope=None,
paths=[],
destination=None,
config=make_repo_config(),
)
assert key == "test_mixed_repo"
# Check that a warning was issued for the failed repo
captured = capsys.readouterr()
assert "Skipping package repository" in captured.err
@pytest.mark.parametrize(
"test_url,expected_type",
[
("ssh://git@github.com/user/repo.git", "git"), # ssh URL
("git://github.com/user/repo.git", "git"), # git protocol
("user@host:repo.git", "git"), # SSH short form
("file:///local/path", "git"), # file URL
("/local/path", "local"), # local path
("./relative/path", "local"), # relative path
("C:\\Windows\\Path", "local"), # Windows path
],
)
def test_add_repo_git_url_detection_edge_cases(monkeypatch, test_url, expected_type):
"""Test edge cases for git URL detection."""
config = make_repo_config()
def mock_parse_config_descriptor(name, entry, lock):
return MockDescriptor({"/path": MockRepo("test_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
spack.cmd.repo._add_repo(
test_url, name=None, scope=None, paths=[], destination=None, config=config
)
entry = config.get("repos").get("test_repo")
if expected_type == "git":
assert entry == {"git": test_url}
else:
assert isinstance(entry, str)
def test_repo_set_git_config(mutable_config):
"""Test that 'spack repo set' properly modifies git repository configurations."""
# Set up initial git repository config in defaults scope
git_url = "https://github.com/example/test-repo.git"
initial_config = {"repos": {"test-repo": {"git": git_url}}}
spack.config.set("repos", initial_config["repos"], scope="site")
# Test setting destination and paths
repo("set", "--scope=user", "--destination", "/custom/path", "test-repo")
repo("set", "--scope=user", "--path", "subdir1", "--path", "subdir2", "test-repo")
# Check that the user config has the updated entry
user_repos = spack.config.get("repos", scope="user")
assert user_repos["test-repo"]["paths"] == ["subdir1", "subdir2"]
assert user_repos["test-repo"]["destination"] == "/custom/path"
# Check that site scope is unchanged
site_repos = spack.config.get("repos", scope="site")
assert "destination" not in site_repos["test-repo"]
def test_repo_set_nonexistent_repo(mutable_config):
with pytest.raises(SpackError, match="No repository with namespace 'nonexistent'"):
repo("set", "--destination", "/some/path", "nonexistent")
def test_repo_set_does_not_work_on_local_path(mutable_config):
spack.config.set("repos", {"local-repo": "/local/path"}, scope="site")
with pytest.raises(SpackError, match="is not a git repository"):
repo("set", "--destination", "/some/path", "local-repo")
def test_add_repo_prepends_instead_of_appends(monkeypatch, tmp_path: pathlib.Path):
"""Test that newly added repositories are prepended to the configuration,
giving them higher priority than existing repositories."""
existing_path = str(tmp_path / "existing_repo")
new_path = str(tmp_path / "new_repo")
config = make_repo_config({"existing_repo": existing_path})
def mock_parse_config_descriptor(name, entry, lock):
return MockDescriptor({new_path: MockRepo("new_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
# Add a new repository
key = spack.cmd.repo._add_repo(
path_or_repo=new_path,
name="new_repo",
scope=None,
paths=[],
destination=None,
config=config,
)
assert key == "new_repo"
# Check that the new repository is first in the configuration
repos_config = config.get("repos", scope=None)
repo_names = list(repos_config.keys())
# The new repository should be first (highest priority)
assert repo_names == ["new_repo", "existing_repo"]
assert repos_config["new_repo"] == new_path
assert repos_config["existing_repo"] == existing_path
def test_repo_list_format_flags(
mutable_config: spack.config.Configuration, tmp_path: pathlib.Path
):
"""Test the --config-names and --namespaces flags for repo list command"""
# Fake a git monorepo with two package repositories
(tmp_path / "monorepo" / ".git").mkdir(parents=True)
repo("create", str(tmp_path / "monorepo"), "repo_one")
repo("create", str(tmp_path / "monorepo"), "repo_two")
mutable_config.set(
"repos",
{
# git repo that provides two package repositories
"monorepo": {
"git": "https://example.com/monorepo.git",
"destination": str(tmp_path / "monorepo"),
"paths": ["spack_repo/repo_one", "spack_repo/repo_two"],
},
# git repo that is not yet cloned
"uninitialized": {
"git": "https://example.com/uninitialized.git",
"destination": str(tmp_path / "uninitialized"),
},
# invalid local repository
"misconfigured": str(tmp_path / "misconfigured"),
},
scope="site",
)
# Test default table format, which shows one line per package repository
table_output = repo("list", output=str)
assert "[+] repo_one" in table_output
assert "[+] repo_two" in table_output
assert " - uninitialized" in table_output
assert "[-] misconfigured" in table_output
# Test --namespaces flag
namespaces_output = repo("list", "--namespaces", output=str)
assert namespaces_output.strip().split("\n") == ["repo_one", "repo_two"]
# Test --names flag
config_names_output = repo("list", "--names", output=str)
config_names_lines = config_names_output.strip().split("\n")
assert config_names_lines == ["monorepo", "uninitialized", "misconfigured"]
@pytest.mark.parametrize(
"repo_name,flags",
[
("new_repo", []),
("new_repo", ["--branch", "develop"]),
("new_repo", ["--branch", "develop", "--remote", "upstream"]),
("new_repo", ["--tag", "v1.0"]),
("new_repo", ["--commit", "abc123"]),
],
)
def test_repo_update_successful_flags(monkeypatch, mutable_config, repo_name, flags):
"""Test repo update with flags."""
def mock_parse_config_descriptor(name, entry, lock):
return MockDescriptor({"/path": MockRepo("new_repo")})
monkeypatch.setattr(spack.repo, "parse_config_descriptor", mock_parse_config_descriptor)
monkeypatch.setattr(spack.repo, "RemoteRepoDescriptor", MockDescriptor)
repos_config = spack.config.get("repos")
repos_config[repo_name] = {"git": "https://github.com/example/repo.git"}
spack.config.set("repos", repos_config)
repo("update", repo_name, *flags)
# check that the branch,tag,commit was updated in the configuration
repos_config = spack.config.get("repos")
if "--branch" in flags:
assert repos_config[repo_name]["branch"] == "develop"
if "--tag" in flags:
assert repos_config[repo_name]["tag"] == "v1.0"
if "--commit" in flags:
assert repos_config[repo_name]["commit"] == "abc123"
@pytest.mark.parametrize(
"flags",
[
["--branch", "develop"],
["--branch", "develop", "new_repo_1", "new_repo_2"],
["--branch", "develop", "unknown_repo"],
],
)
def test_repo_update_invalid_flags(monkeypatch, mutable_config, flags):
"""Test repo update with invalid flags."""
with pytest.raises(SpackError):
repo("update", *flags)
| MockDescriptor |
python | kamyu104__LeetCode-Solutions | Python/best-team-with-no-conflicts.py | {
"start": 4986,
"end": 5615
} | class ____(object):
def bestTeamScore(self, scores, ages):
"""
:type scores: List[int]
:type ages: List[int]
:rtype: int
"""
players = sorted(zip(scores, ages))
dp = [0]*len(players)
result = 0
for i in xrange(len(players)):
dp[i] = players[i][0]
for j in xrange(i):
if players[j][1] <= players[i][1]:
dp[i] = max(dp[i], dp[j] + players[i][0])
result = max(result, dp[i])
return result
# Time: O(n^2)
# Space: O(n)
# longest_increasing_subsequence like dp solution
| Solution5 |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/shuffle_test.py | {
"start": 20367,
"end": 23646
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_shuffle_dataset(
self,
range_limit=10,
num_repeats=5,
buffer_size=5,
seed=None,
reshuffle_each_iteration=None,
symbolic_checkpoint=None,
):
dataset = (
dataset_ops.Dataset.range(range_limit)
.shuffle(
buffer_size,
seed=seed,
reshuffle_each_iteration=reshuffle_each_iteration,
)
.repeat(num_repeats)
)
if symbolic_checkpoint:
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
symbolic_checkpoint=[True, False],
reshuffle_each_iteration=[True, False],
buffer_size=[1, 3, 5, 8, 10, dataset_ops.UNKNOWN],
),
)
)
def test(
self,
verify_fn,
symbolic_checkpoint,
reshuffle_each_iteration,
buffer_size,
):
seed = 55
range_limit = 5
num_repeats = 2
num_outputs = range_limit * num_repeats
# pylint: disable=g-long-lambda
verify_fn(
self,
lambda: self._build_shuffle_dataset(
range_limit=range_limit,
num_repeats=num_repeats,
buffer_size=buffer_size,
seed=seed,
reshuffle_each_iteration=reshuffle_each_iteration,
symbolic_checkpoint=symbolic_checkpoint,
),
num_outputs,
)
@combinations.generate(
combinations.combine(
tf_api_version=1,
mode=["graph"],
reshuffle_each_iteration=[True, False],
buffer_size=[1, 3, 5, 8, 10]))
def testMultipleIterators(self, reshuffle_each_iteration, buffer_size):
range_limit = 5
num_repeats = 2
num_outputs = range_limit * num_repeats
def ds_fn():
# pylint: disable=cell-var-from-loop
return self._build_shuffle_dataset(
range_limit=range_limit,
num_repeats=num_repeats,
buffer_size=buffer_size,
seed=None, # Iterator seeds are generated non-deterministically.
reshuffle_each_iteration=reshuffle_each_iteration)
# pylint: enable=cell-var-from-loop
with ops.Graph().as_default() as g:
ds = ds_fn()
iterators = [ds.make_one_shot_iterator(), ds.make_one_shot_iterator()]
get_next_ops = [it.get_next() for it in iterators]
saveables = [
contrib_iterator_ops.make_saveable_from_iterator(it)
for it in iterators
]
for saveable in saveables:
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
saver = saver_lib.Saver(allow_empty=True)
with self.session(graph=g) as sess:
self._save(sess, saver)
expected = [self.evaluate(get_next_ops) for _ in range(num_outputs)]
self._restore(saver, sess)
actual = [self.evaluate(get_next_ops) for _ in range(num_outputs)]
self.match(expected, actual)
| ShuffleCheckpointTest |
python | django__django | tests/model_forms/models.py | {
"start": 8400,
"end": 8446
} | class ____(Book, BookXtra):
pass
| DerivedBook |
python | ethereum__web3.py | tests/integration/go_ethereum/test_goethereum_http.py | {
"start": 5177,
"end": 6725
} | class ____(GoEthereumAsyncEthModuleTest):
@pytest.mark.asyncio
async def test_async_http_provider_disconnects_gracefully(self, async_w3) -> None:
w3_1 = async_w3
w3_2 = AsyncWeb3(AsyncHTTPProvider(async_w3.provider.endpoint_uri))
assert w3_1 != w3_2
await w3_1.eth.get_block("latest")
await w3_2.eth.get_block("latest")
w3_1_session_cache = w3_1.provider._request_session_manager.session_cache
w3_2_session_cache = w3_2.provider._request_session_manager.session_cache
for _, session in w3_1_session_cache.items():
assert not session.closed
for _, session in w3_2_session_cache.items():
assert not session.closed
assert w3_1_session_cache != w3_2_session_cache
await w3_1.provider.disconnect()
await w3_2.provider.disconnect()
assert len(w3_1_session_cache) == 0
assert len(w3_2_session_cache) == 0
@pytest.mark.asyncio
async def test_async_http_provider_reuses_cached_session(self, async_w3) -> None:
await async_w3.eth.get_block("latest")
session_cache = async_w3.provider._request_session_manager.session_cache
assert len(session_cache) == 1
session = list(session_cache._data.values())[0]
await async_w3.eth.get_block("latest")
assert len(session_cache) == 1
assert session == list(session_cache._data.values())[0]
await async_w3.provider.disconnect()
assert len(session_cache) == 0
| TestGoEthereumAsyncEthModuleTest |
python | huggingface__transformers | src/transformers/models/llama/modeling_llama.py | {
"start": 21681,
"end": 21965
} | class ____(GenericForTokenClassification, LlamaPreTrainedModel): ...
__all__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
"LlamaForQuestionAnswering",
"LlamaForTokenClassification",
]
| LlamaForTokenClassification |
python | PyCQA__pylint | tests/functional/s/self/self_cls_assignment.py | {
"start": 920,
"end": 1239
} | class ____:
"""Test class for nonlocal assignment of self"""
def function(self, param):
"""This function uses nonlocal to reassign self"""
def _set_param(param):
nonlocal self
self = param # [self-cls-assignment]
_set_param(param)
return self
| TestNonLocal |
python | huggingface__transformers | src/transformers/models/beit/modeling_beit.py | {
"start": 28277,
"end": 29448
} | class ____(PreTrainedModel):
config: BeitConfig
base_model_prefix = "beit"
input_modalities = ("image",)
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["BeitLayer"]
_keys_to_ignore_on_load_unexpected = [r".*relative_position_index.*"]
_supports_sdpa = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, BeitEmbeddings):
init.zeros_(module.cls_token)
if module.mask_token is not None:
init.zeros_(module.mask_token)
if module.position_embeddings is not None:
init.zeros_(module.position_embeddings)
elif isinstance(module, BeitRelativePositionBias):
init.zeros_(module.relative_position_bias_table)
elif isinstance(module, BeitLayer):
if module.lambda_1 is not None:
init.constant_(module.lambda_1, self.config.layer_scale_init_value)
init.constant_(module.lambda_2, self.config.layer_scale_init_value)
@auto_docstring
| BeitPreTrainedModel |
python | mlflow__mlflow | mlflow/langchain/langchain_tracer.py | {
"start": 1646,
"end": 24086
} | class ____(BaseCallbackHandler, metaclass=ExceptionSafeAbstractClass):
"""
Callback for auto-logging traces.
We need to inherit ExceptionSafeAbstractClass to avoid invalid new
input arguments added to original function call.
Args:
prediction_context: Optional prediction context object to be set for the
thread-local context. Occasionally this has to be passed manually because
the callback may be invoked asynchronously and Langchain doesn't correctly
propagate the thread-local context.
"""
def __init__(
self,
prediction_context: Optional["Context"] = None,
):
# NB: The tracer can handle multiple traces in parallel under multi-threading scenarios.
# DO NOT use instance variables to manage the state of single trace.
super().__init__()
# run_id: (LiveSpan, OTel token)
self._run_span_mapping: dict[str, SpanWithToken] = {}
self._prediction_context = prediction_context
def _get_span_by_run_id(self, run_id: UUID) -> LiveSpan | None:
if span_with_token := self._run_span_mapping.get(str(run_id), None):
return span_with_token.span
raise MlflowException(f"Span for run_id {run_id!s} not found.")
def _serialize_invocation_params(
self, attributes: dict[str, Any] | None
) -> dict[str, Any] | None:
"""
Serialize the 'invocation_params' in the attributes dictionary.
If 'invocation_params' contains a key 'response_format' whose value is a subclass
of pydantic.BaseModel, replace it with its JSON schema.
"""
if not attributes:
return attributes
invocation_params = attributes.get("invocation_params")
if not isinstance(invocation_params, dict):
return attributes
response_format = invocation_params.get("response_format")
if isinstance(response_format, type) and issubclass(response_format, pydantic.BaseModel):
try:
invocation_params["response_format"] = response_format.model_json_schema()
except Exception as e:
_logger.error(
"Failed to generate JSON schema for response_format: %s", e, exc_info=True
)
return attributes
def _start_span(
self,
span_name: str,
parent_run_id: UUID | None,
span_type: str,
run_id: UUID,
inputs: str | dict[str, Any] | None = None,
attributes: dict[str, Any] | None = None,
) -> LiveSpan:
"""Start MLflow Span (or Trace if it is root component)"""
serialized_attributes = self._serialize_invocation_params(attributes)
dependencies_schemas = (
self._prediction_context.dependencies_schemas if self._prediction_context else None
)
with maybe_set_prediction_context(
self._prediction_context
): # When parent_run_id is None, this is root component so start trace
span = start_span_no_context(
name=span_name,
span_type=span_type,
parent_span=self._get_parent_span(parent_run_id),
inputs=inputs,
attributes=serialized_attributes,
tags=dependencies_schemas,
)
# Debugging purpose
if span.trace_id == NO_OP_SPAN_TRACE_ID:
_logger.debug("No Op span was created, the trace will not be recorded.")
# Attach the span to the current context to mark it "active"
token = set_span_in_context(span) if _should_attach_span_to_context.get() else None
self._run_span_mapping[str(run_id)] = SpanWithToken(span, token)
return span
def _get_parent_span(self, parent_run_id) -> LiveSpan | None:
"""
Get parent span to create a new span under.
Ideally, we can simply rely on the active span in current context. However, LangChain
execution heavily uses threads and asyncio, and sometimes ContextVar is not correctly
propagated, resulting in missing parent span.
To address this, we check two sources of parent span:
1. An active span in current MLflow tracing context (get_current_active_span)
2. If parent_run_id is given by LangChain, get the corresponding span from the mapping
The complex case is when BOTH are present but different. In this case, we need to
resolve the correct parent span by traversing the span tree.
"""
parent_mlflow_span = mlflow.get_current_active_span()
parent_lc_span = self._get_span_by_run_id(parent_run_id) if parent_run_id else None
if parent_mlflow_span and parent_lc_span:
if parent_mlflow_span.span_id == parent_lc_span.span_id:
return parent_mlflow_span
else:
return self._resolve_parent_span(parent_mlflow_span, parent_lc_span)
elif parent_mlflow_span:
return parent_mlflow_span
elif parent_lc_span:
return parent_lc_span
def _resolve_parent_span(self, parent_mlflow_span, parent_lc_span):
"""
Resolve the correct parent span when both MLflow and LangChain provide different
parent spans.
For example, the following two examples are mostly same but slightly different: where the
mlflow.start_span() is used.
For example, the following two examples are mostly same but slightly different: where the
mlflow.start_span() is used.
```python
llm = ChatOpenAI()
@tool
def custom_tool_node(inputs):
response = ChatOpenAI().invoke(...)
return response.content
graph = create_react_agent(llm, [custom_tool_node])
with mlflow.start_span("parent"):
graph.invoke({"prompt": "Hello"})
```
The correct span structure for this case is [parent] -> [tool] -> [ChatOpenAI]
```python
@tool
def custom_tool_node(inputs):
with mlflow.start_span("parent"):
response = ChatOpenAI().invoke(...)
return response.content
graph = create_react_agent(llm, [custom_tool_node])
graph.invoke({"prompt": "Hello"})
```
The correct span structure for this case is [tool] -> [parent] -> [ChatOpenAI]
When we try to create a new span for ChatOpenAI, we need to determine which span is the
parent span, "parent" or "tool". Unfortunately, there is no way to decide this from
metadata provided in the span itself, so we need to traverse the span tree and check
if one is parent of the other.
"""
trace_manager = InMemoryTraceManager.get_instance()
span = parent_mlflow_span
while span.parent_id:
if span.parent_id == parent_lc_span.span_id:
# MLflow parent span is under the LangChain
# langchain_span
# └── mlflow_span
# └── current span
return parent_mlflow_span
span = trace_manager.get_span_from_id(span.trace_id, span.parent_id)
# MLflow span is parent of LangChain span
# mlflow_span
# └── langchain_span
# └── current span
#
# or two spans are not related at all, then fallback to LangChain one.
return parent_lc_span
def _end_span(
self,
run_id: UUID,
span: LiveSpan,
outputs=None,
attributes=None,
status=SpanStatus(SpanStatusCode.OK),
):
"""Close MLflow Span (or Trace if it is root component)"""
try:
with maybe_set_prediction_context(self._prediction_context):
span.end(
outputs=outputs,
attributes=attributes,
status=status,
)
finally:
# Span should be detached from the context even when the client.end_span fails
st = self._run_span_mapping.pop(str(run_id), None)
if _should_attach_span_to_context.get():
if st.token is None:
raise MlflowException(
f"Token for span {st.span} is not found. "
"Cannot detach the span from context."
)
detach_span_from_context(st.token)
def flush(self):
"""Flush the state of the tracer."""
# Ideally, all spans should be popped and ended. However, LangChain sometimes
# does not trigger the end event properly and some spans may be left open.
# To avoid leaking tracing context, we remove all spans from the mapping.
for st in self._run_span_mapping.values():
if st.token:
_logger.debug(f"Found leaked span {st.span}. Force ending it.")
detach_span_from_context(st.token)
self._run_span_mapping = {}
def _assign_span_name(self, serialized: dict[str, Any], default_name="unknown") -> str:
return serialized.get("name", serialized.get("id", [default_name])[-1])
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
):
"""Run when a chat model starts running."""
if metadata:
kwargs.update({"metadata": metadata})
kwargs[SpanAttributeKey.MESSAGE_FORMAT] = "langchain"
span = self._start_span(
span_name=name or self._assign_span_name(serialized, "chat model"),
parent_run_id=parent_run_id,
span_type=SpanType.CHAT_MODEL,
run_id=run_id,
inputs=messages,
attributes=kwargs,
)
if tools := self._extract_tool_definitions(kwargs):
set_span_chat_tools(span, tools)
def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
) -> None:
"""Run when LLM (non-chat models) starts running."""
if metadata:
kwargs.update({"metadata": metadata})
kwargs[SpanAttributeKey.MESSAGE_FORMAT] = "langchain"
span = self._start_span(
span_name=name or self._assign_span_name(serialized, "llm"),
parent_run_id=parent_run_id,
span_type=SpanType.LLM,
run_id=run_id,
inputs=prompts,
attributes=kwargs,
)
if tools := self._extract_tool_definitions(kwargs):
set_span_chat_tools(span, tools)
def _extract_tool_definitions(self, kwargs: dict[str, Any]) -> list[ChatTool]:
raw_tools = kwargs.get("invocation_params", {}).get("tools", [])
tools = []
for raw_tool in raw_tools:
# First, try to parse the raw tool dictionary as OpenAI-style tool
try:
tool = ChatTool.model_validate(raw_tool)
tools.append(tool)
except pydantic.ValidationError:
# If not OpenAI style, just try to extract the name and descriptions.
if name := raw_tool.get("name"):
tool = ChatTool(
type="function",
function=FunctionToolDefinition(
name=name, description=raw_tool.get("description")
),
)
tools.append(tool)
else:
_logger.warning(f"Failed to parse tool definition for tracing: {raw_tool}.")
return tools
def on_llm_new_token(
self,
token: str,
*,
chunk: GenerationChunk | ChatGenerationChunk | None = None,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
):
"""Run on new LLM token. Only available when streaming is enabled."""
llm_span = self._get_span_by_run_id(run_id)
event_kwargs = {"token": token}
if chunk:
event_kwargs["chunk"] = dumps(chunk)
llm_span.add_event(
SpanEvent(
name="new_token",
attributes=event_kwargs,
)
)
def on_retry(
self,
retry_state: RetryCallState,
*,
run_id: UUID,
**kwargs: Any,
):
"""Run on a retry event."""
span = self._get_span_by_run_id(run_id)
retry_d: dict[str, Any] = {
"slept": retry_state.idle_for,
"attempt": retry_state.attempt_number,
}
if retry_state.outcome is None:
retry_d["outcome"] = "N/A"
elif retry_state.outcome.failed:
retry_d["outcome"] = "failed"
exception = retry_state.outcome.exception()
retry_d["exception"] = str(exception)
retry_d["exception_type"] = exception.__class__.__name__
else:
retry_d["outcome"] = "success"
retry_d["result"] = str(retry_state.outcome.result())
span.add_event(
SpanEvent(
name="retry",
attributes=retry_d,
)
)
def on_llm_end(self, response: LLMResult, *, run_id: UUID, **kwargs: Any):
"""End the span for an LLM run."""
llm_span = self._get_span_by_run_id(run_id)
# response.generations is a nested list of messages
generations = [g for gen_list in response.generations for g in gen_list]
# Record the token usage attribute
try:
if usage := parse_token_usage(generations):
llm_span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage)
except Exception as e:
_logger.debug(f"Failed to log token usage for LangChain: {e}", exc_info=True)
self._end_span(run_id, llm_span, outputs=response)
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
**kwargs: Any,
):
"""Handle an error for an LLM run."""
llm_span = self._get_span_by_run_id(run_id)
llm_span.add_event(SpanEvent.from_exception(error))
self._end_span(run_id, llm_span, status=SpanStatus(SpanStatusCode.ERROR, str(error)))
def on_chain_start(
self,
serialized: dict[str, Any],
inputs: dict[str, Any] | Any,
*,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
run_type: str | None = None,
name: str | None = None,
**kwargs: Any,
):
"""Start span for a chain run."""
if metadata:
kwargs.update({"metadata": metadata})
self._start_span(
span_name=name or self._assign_span_name(serialized, "chain"),
parent_run_id=parent_run_id,
span_type=SpanType.CHAIN,
run_id=run_id,
inputs=inputs,
attributes=kwargs,
)
# NB: We need to guard this with active trace existence because sometimes LangGraph
# execute the callback within an isolated thread where the active trace is not set.
if (
metadata is not None
and (thread_id := metadata.get("thread_id"))
and mlflow.get_current_active_span() is not None
):
mlflow.update_current_trace(metadata={TraceMetadataKey.TRACE_SESSION: thread_id})
def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
inputs: dict[str, Any] | Any | None = None,
**kwargs: Any,
):
"""Run when chain ends running."""
chain_span = self._get_span_by_run_id(run_id)
if inputs:
chain_span.set_inputs(inputs)
self._end_span(run_id, chain_span, outputs=outputs)
def on_chain_error(
self,
error: BaseException,
*,
inputs: dict[str, Any] | Any | None = None,
run_id: UUID,
**kwargs: Any,
):
"""Run when chain errors."""
chain_span = self._get_span_by_run_id(run_id)
if inputs:
chain_span.set_inputs(inputs)
chain_span.add_event(SpanEvent.from_exception(error))
self._end_span(run_id, chain_span, status=SpanStatus(SpanStatusCode.ERROR, str(error)))
def on_tool_start(
self,
serialized: dict[str, Any],
input_str: str,
*,
run_id: UUID,
tags: list[str] | None = None,
parent_run_id: UUID | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
# We don't use inputs here because LangChain override the original inputs
# with None for some cases. In order to avoid losing the original inputs,
# we try to parse the input_str instead.
# https://github.com/langchain-ai/langchain/blob/2813e8640703b8066d8dd6c739829bb4f4aa634e/libs/core/langchain_core/tools/base.py#L636-L640
inputs: dict[str, Any] | None = None,
**kwargs: Any,
):
"""Start span for a tool run."""
if metadata:
kwargs.update({"metadata": metadata})
# For function calling, input_str can be a stringified dictionary
# like "{'key': 'value'}". We try parsing it for better rendering,
# but conservatively fallback to original if it fails.
try:
inputs = ast.literal_eval(input_str)
except Exception:
inputs = input_str
self._start_span(
span_name=name or self._assign_span_name(serialized, "tool"),
parent_run_id=parent_run_id,
span_type=SpanType.TOOL,
run_id=run_id,
inputs=inputs,
attributes=kwargs,
)
def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any):
"""Run when tool ends running."""
tool_span = self._get_span_by_run_id(run_id)
self._end_span(run_id, tool_span, outputs=output)
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
**kwargs: Any,
):
"""Run when tool errors."""
tool_span = self._get_span_by_run_id(run_id)
tool_span.add_event(SpanEvent.from_exception(error))
self._end_span(run_id, tool_span, status=SpanStatus(SpanStatusCode.ERROR, str(error)))
def on_retriever_start(
self,
serialized: dict[str, Any],
query: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
name: str | None = None,
**kwargs: Any,
):
"""Run when Retriever starts running."""
if metadata:
kwargs.update({"metadata": metadata})
self._start_span(
span_name=name or self._assign_span_name(serialized, "retriever"),
parent_run_id=parent_run_id,
span_type=SpanType.RETRIEVER,
run_id=run_id,
inputs=query,
attributes=kwargs,
)
def on_retriever_end(self, documents: Sequence[Document], *, run_id: UUID, **kwargs: Any):
"""Run when Retriever ends running."""
retriever_span = self._get_span_by_run_id(run_id)
try:
# attempt to convert documents to MlflowDocument
documents = [MlflowDocument.from_langchain_document(doc) for doc in documents]
except Exception as e:
_logger.debug(
f"Failed to convert LangChain Document to MLflow Document: {e}",
exc_info=True,
)
self._end_span(
run_id,
retriever_span,
outputs=documents,
)
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
**kwargs: Any,
):
"""Run when Retriever errors."""
retriever_span = self._get_span_by_run_id(run_id)
retriever_span.add_event(SpanEvent.from_exception(error))
self._end_span(run_id, retriever_span, status=SpanStatus(SpanStatusCode.ERROR, str(error)))
def on_agent_action(
self,
action: AgentAction,
*,
run_id: UUID,
**kwargs: Any,
) -> Any:
"""
Run on agent action.
NB: Agent action doesn't create a new LangChain Run, so instead of creating a new span,
an action will be recorded as an event of the existing span created by a parent chain.
"""
span = self._get_span_by_run_id(run_id)
span.add_event(
SpanEvent(
name="agent_action",
attributes={
"tool": action.tool,
"tool_input": dumps(action.tool_input),
"log": action.log,
},
)
)
def on_agent_finish(
self,
finish: AgentFinish,
*,
run_id: UUID,
**kwargs: Any,
) -> Any:
"""Run on agent end."""
span = self._get_span_by_run_id(run_id)
span.add_event(
SpanEvent(
name="agent_finish",
attributes={"return_values": dumps(finish.return_values), "log": finish.log},
)
)
def on_text(
self,
text: str,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
"""Run on arbitrary text."""
try:
span = self._get_span_by_run_id(run_id)
except MlflowException:
_logger.warning("Span not found for text event. Skipping text event logging.")
else:
span.add_event(
SpanEvent(
"text",
attributes={"text": text},
)
)
| MlflowLangchainTracer |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/event_ids.py | {
"start": 324,
"end": 1775
} | class ____(ComputedBase):
"""Look at both debug_id and info_id if info_id is queried"""
event_id_columns: list[str] = ["info_id", "debug_id"]
@classmethod
def visit_eq(cls, value: UUID) -> Condition:
return Condition(
Function(
"or",
_make_conditions_from_column_names(cls.event_id_columns, Op.EQ, to_uuid(value)),
),
Op.EQ,
1,
)
@classmethod
def visit_neq(cls, value: UUID) -> Condition:
return Condition(
Function(
"and",
_make_conditions_from_column_names(cls.event_id_columns, Op.NEQ, to_uuid(value)),
),
Op.EQ,
1,
)
@classmethod
def visit_in(cls, value: list[UUID]) -> Condition:
return Condition(
Function(
"or",
_make_conditions_from_column_names(
cls.event_id_columns, Op.IN, [str(v) for v in value]
),
),
Op.EQ,
1,
)
@classmethod
def visit_not_in(cls, value: list[UUID]) -> Condition:
return Condition(
Function(
"and",
_make_conditions_from_column_names(
cls.event_id_columns, Op.NOT_IN, [str(v) for v in value]
),
),
Op.EQ,
1,
)
| InfoIdScalar |
python | kamyu104__LeetCode-Solutions | Python/lowest-common-ancestor-of-a-binary-tree-iv.py | {
"start": 132,
"end": 1217
} | class ____(object):
def lowestCommonAncestor(self, root, nodes):
"""
:type root: TreeNode
:type nodes: List[TreeNode]
"""
def iter_dfs(root, lookup):
result = [0]
stk = [(1, (root, result))]
while stk:
step, args = stk.pop()
if step == 1:
node, ret = args
if not node or node in lookup:
ret[0] = node
continue
ret1, ret2 = [None], [None]
stk.append((2, (node, ret1, ret2, ret)))
stk.append((1, (node.right, ret2)))
stk.append((1, (node.left, ret1)))
elif step == 2:
node, ret1, ret2, ret = args
if ret1[0] and ret2[0]:
ret[0] = node
else:
ret[0] = ret1[0] or ret2[0]
return result[0]
return iter_dfs(root, set(nodes))
# Time: O(n)
# Space: O(h)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/maximal-square.py | {
"start": 31,
"end": 1093
} | class ____(object):
# @param {character[][]} matrix
# @return {integer}
def maximalSquare(self, matrix):
if not matrix:
return 0
m, n = len(matrix), len(matrix[0])
size = [[0 for j in xrange(n)] for i in xrange(2)]
max_size = 0
for j in xrange(n):
if matrix[0][j] == '1':
size[0][j] = 1
max_size = max(max_size, size[0][j])
for i in xrange(1, m):
if matrix[i][0] == '1':
size[i % 2][0] = 1
else:
size[i % 2][0] = 0
for j in xrange(1, n):
if matrix[i][j] == '1':
size[i % 2][j] = min(size[i % 2][j - 1], \
size[(i - 1) % 2][j], \
size[(i - 1) % 2][j - 1]) + 1
max_size = max(max_size, size[i % 2][j])
else:
size[i % 2][j] = 0
return max_size * max_size
# Time: O(n^2)
# Space: O(n^2)
# DP.
| Solution |
python | pyca__cryptography | tests/hazmat/asn1/test_api.py | {
"start": 404,
"end": 3549
} | class ____:
def test_repr_printable_string(self) -> None:
my_string = "MyString"
assert (
repr(asn1.PrintableString(my_string))
== f"PrintableString({my_string!r})"
)
def test_printable_string_as_str(self) -> None:
my_string = "MyString"
assert asn1.PrintableString(my_string).as_str() == my_string
def test_invalid_printable_string(self) -> None:
with pytest.raises(ValueError, match="invalid PrintableString: café"):
asn1.PrintableString("café")
def test_utc_time_as_datetime(self) -> None:
dt = datetime.datetime(
2000, 1, 1, 10, 10, 10, tzinfo=datetime.timezone.utc
)
assert asn1.UtcTime(dt).as_datetime() == dt
def test_repr_utc_time(self) -> None:
dt = datetime.datetime(
2000, 1, 1, 10, 10, 10, tzinfo=datetime.timezone.utc
)
assert repr(asn1.UtcTime(dt)) == f"UtcTime({dt!r})"
def test_invalid_utc_time(self) -> None:
with pytest.raises(
ValueError,
match="cannot initialize with naive datetime object",
):
# We don't allow naive datetime objects
asn1.UtcTime(datetime.datetime(2000, 1, 1, 10, 10, 10))
with pytest.raises(ValueError, match="invalid UtcTime"):
# UtcTime does not support dates before 1950
asn1.UtcTime(
datetime.datetime(
1940, 1, 1, 10, 10, 10, tzinfo=datetime.timezone.utc
)
)
with pytest.raises(ValueError, match="invalid UtcTime"):
# UtcTime does not support dates after 2050
asn1.UtcTime(
datetime.datetime(
2090, 1, 1, 10, 10, 10, tzinfo=datetime.timezone.utc
)
)
with pytest.raises(
ValueError,
match="invalid UtcTime: fractional seconds are not supported",
):
# UtcTime does not support fractional seconds
asn1.UtcTime(
datetime.datetime(
2020,
1,
1,
10,
10,
10,
500000,
tzinfo=datetime.timezone.utc,
)
)
def test_generalized_time_as_datetime(self) -> None:
dt = datetime.datetime(
2000, 1, 1, 10, 10, 10, 300000, tzinfo=datetime.timezone.utc
)
assert asn1.GeneralizedTime(dt).as_datetime() == dt
def test_repr_generalized_time(self) -> None:
dt = datetime.datetime(
2000, 1, 1, 10, 10, 10, 300000, tzinfo=datetime.timezone.utc
)
assert repr(asn1.GeneralizedTime(dt)) == f"GeneralizedTime({dt!r})"
def test_invalid_generalized_time(self) -> None:
with pytest.raises(
ValueError,
match="cannot initialize with naive datetime object",
):
# We don't allow naive datetime objects
asn1.GeneralizedTime(datetime.datetime(2000, 1, 1, 10, 10, 10))
| TestTypesAPI |
python | sympy__sympy | sympy/printing/jscode.py | {
"start": 1130,
"end": 11981
} | class ____(CodePrinter):
""""A Printer to convert Python expressions to strings of JavaScript code
"""
printmethod = '_javascript'
language = 'JavaScript'
_default_settings: dict[str, Any] = dict(CodePrinter._default_settings, **{
'precision': 17,
'user_functions': {},
'contract': True,
})
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {}".format(text)
def _declare_number_const(self, name, value):
return "var {} = {};".format(name, value.evalf(self._settings['precision']))
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (var %(varble)s=%(start)s; %(varble)s<%(end)s; %(varble)s++){"
for i in indices:
# Javascript arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'varble': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
PREC = precedence(expr)
if equal_valued(expr.exp, -1):
return '1/%s' % (self.parenthesize(expr.base, PREC))
elif equal_valued(expr.exp, 0.5):
return 'Math.sqrt(%s)' % self._print(expr.base)
elif expr.exp == S.One/3:
return 'Math.cbrt(%s)' % self._print(expr.base)
else:
return 'Math.pow(%s, %s)' % (self._print(expr.base),
self._print(expr.exp))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d/%d' % (p, q)
def _print_Mod(self, expr):
num, den = expr.args
PREC = precedence(expr)
snum, sden = [self.parenthesize(arg, PREC) for arg in expr.args]
# % is remainder (same sign as numerator), not modulo (same sign as
# denominator), in js. Hence, % only works as modulo if both numbers
# have the same sign
if (num.is_nonnegative and den.is_nonnegative or
num.is_nonpositive and den.is_nonpositive):
return f"{snum} % {sden}"
return f"(({snum} % {sden}) + {sden}) % {sden}"
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Exp1(self, expr):
return "Math.E"
def _print_Pi(self, expr):
return 'Math.PI'
def _print_Infinity(self, expr):
return 'Number.POSITIVE_INFINITY'
def _print_NegativeInfinity(self, expr):
return 'Number.NEGATIVE_INFINITY'
def _print_Piecewise(self, expr):
from sympy.codegen.ast import Assignment
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c), self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_MatrixElement(self, expr):
return "{}[{}]".format(self.parenthesize(expr.parent,
PRECEDENCE["Atom"], strict=True),
expr.j + expr.i*expr.parent.shape[1])
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line in ('', '\n'):
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def jscode(expr, assign_to=None, **settings):
"""Converts an expr to a string of javascript code
Parameters
==========
expr : Expr
A SymPy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, js_function_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import jscode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols("x, tau")
>>> jscode((2*tau)**Rational(7, 2))
'8*Math.sqrt(2)*Math.pow(tau, 7/2)'
>>> jscode(sin(x), assign_to="s")
's = Math.sin(x);'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
js_function_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> jscode(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(jscode(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> jscode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(jscode(mat, A))
A[0] = Math.pow(x, 2);
if (x > 0) {
A[1] = x + 1;
}
else {
A[1] = x;
}
A[2] = Math.sin(x);
"""
return JavascriptCodePrinter(settings).doprint(expr, assign_to)
def print_jscode(expr, **settings):
"""Prints the Javascript representation of the given expression.
See jscode for the meaning of the optional arguments.
"""
print(jscode(expr, **settings))
| JavascriptCodePrinter |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 86822,
"end": 89091
} | class ____(torch.nn.Module):
def forward(
self,
primals_1: "Sym(s97)", # PlainAOTInput(idx=0)
primals_2: "Sym(s98)", # PlainAOTInput(idx=1)
primals_3: "f32[s97, s98]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=2), attr='a')
primals_4: "f32[s97, s98]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=2), attr='b')
primals_5: "Sym(s97)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=2), idx=0)
primals_6: "Sym(s98)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=2), idx=1)
primals_7: "Sym(s98)", # SubclassStrideAOTInput(base=PlainAOTInput(idx=2), idx=0)
):
mul: "f32[s97, s98]" = torch.ops.aten.mul.Tensor(primals_3, primals_1); primals_3 = None
mul_3: "f32[s97, s98]" = torch.ops.aten.mul.Tensor(primals_4, primals_1); primals_4 = None
mul_8: "f32[s97, s98]" = torch.ops.aten.mul.Tensor(mul, primals_2); mul = None
mul_11: "f32[s97, s98]" = torch.ops.aten.mul.Tensor(mul_3, primals_2); mul_3 = None
mul_16: "f32[s97, s98]" = torch.ops.aten.mul.Tensor(mul_8, primals_1); mul_8 = None
mul_19: "f32[s97, s98]" = torch.ops.aten.mul.Tensor(mul_11, primals_1); mul_11 = None
mul_24: "f32[s97, s98]" = torch.ops.aten.mul.Tensor(mul_16, primals_2); mul_16 = None
mul_27: "f32[s97, s98]" = torch.ops.aten.mul.Tensor(mul_19, primals_2); mul_19 = None
return (
mul_24, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=0), attr='a')
mul_27, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=0), attr='b')
primals_5, # SubclassSizeAOTOutput(base=PlainAOTOutput(idx=0), idx=0)
primals_7, # SubclassSizeAOTOutput(base=PlainAOTOutput(idx=0), idx=1)
primals_7, # SubclassStrideAOTOutput(base=PlainAOTOutput(idx=0), idx=0)
primals_1, # SavedForBackwardsAOTOutput(idx=0)
primals_2, # SavedForBackwardsAOTOutput(idx=1)
primals_5, # SavedForBackwardsAOTOutput(idx=2)
primals_7, # SavedForBackwardsAOTOutput(idx=3)
)
""", # noqa: B950
)
self.assertExpectedInline(
normalize_gm(bw[0].print_readable(print_output=False, expanded_def=True)),
"""\
| GraphModule |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-postorder-traversal.py | {
"start": 1212,
"end": 1768
} | class ____(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
result, stack = [], [(root, False)]
while stack:
root, is_visited = stack.pop()
if root is None:
continue
if is_visited:
result.append(root.val)
else:
stack.append((root, True))
stack.append((root.right, False))
stack.append((root.left, False))
return result
| Solution2 |
python | scipy__scipy | benchmarks/benchmarks/sparse.py | {
"start": 6436,
"end": 7239
} | class ____(Benchmark):
param_names = ['sparse_type', 'num_matrices']
params = [
['spmatrix', 'sparray'],
[1000, 5000, 10000, 15000, 20000],
]
def setup(self, sparse_type, num_matrices):
coo = sparse.coo_array if sparse_type == "sparray" else sparse.coo_matrix
self.matrices = []
for i in range(num_matrices):
rows = np.random.randint(1, 4)
columns = np.random.randint(1, 4)
mat = np.random.randint(0, 10, (rows, columns))
if i == 0:
self.matrices.append(coo(mat)) # make 1st requested sparse_type
else:
self.matrices.append(mat)
def time_block_diag(self, sparse_type, num_matrices):
sparse.block_diag(self.matrices)
| BlockDiagDenseConstruction |
python | pytorch__pytorch | test/distributed/test_c10d_gloo.py | {
"start": 6423,
"end": 6830
} | class ____(TestCase):
@retry_on_connect_failures
def test_tcp_init(self):
rendezvous_iterator = dist.rendezvous("tcp://127.0.0.1:0", rank=0, world_size=1)
store, rank, world_size = next(rendezvous_iterator)
self.assertEqual(rank, 0)
self.assertEqual(world_size, 1)
# port number should get assigned
self.assertNotEqual(store.port, "0")
| RendezvousTCPTest |
python | doocs__leetcode | solution/0100-0199/0139.Word Break/Solution2.py | {
"start": 0,
"end": 374
} | class ____:
def __init__(self):
self.children: List[Trie | None] = [None] * 26
self.isEnd = False
def insert(self, w: str):
node = self
for c in w:
idx = ord(c) - ord('a')
if not node.children[idx]:
node.children[idx] = Trie()
node = node.children[idx]
node.isEnd = True
| Trie |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 16595,
"end": 16956
} | class ____:
"""Test base bank provider"""
def test_bank_not_implemented_error(self, faker):
"""Test that bank() raises AttributeError when no banks attribute exists"""
provider = BankProvider(faker)
assert not hasattr(provider, "banks")
with pytest.raises(AttributeError):
provider.bank()
| TestBaseBankProvider |
python | aio-libs__aiohttp | aiohttp/connector.py | {
"start": 28770,
"end": 58395
} | class ____(BaseConnector):
"""TCP connector.
verify_ssl - Set to True to check ssl certifications.
fingerprint - Pass the binary sha256
digest of the expected certificate in DER format to verify
that the certificate the server presents matches. See also
https://en.wikipedia.org/wiki/HTTP_Public_Key_Pinning
resolver - Enable DNS lookups and use this
resolver
use_dns_cache - Use memory cache for DNS lookups.
ttl_dns_cache - Max seconds having cached a DNS entry, None forever.
family - socket address family
local_addr - local tuple of (host, port) to bind socket to
keepalive_timeout - (optional) Keep-alive timeout.
force_close - Set to True to force close and do reconnect
after each request (and between redirects).
limit - The total number of simultaneous connections.
limit_per_host - Number of simultaneous connections to one host.
enable_cleanup_closed - Enables clean-up closed ssl transports.
Disabled by default.
happy_eyeballs_delay - This is the “Connection Attempt Delay”
as defined in RFC 8305. To disable
the happy eyeballs algorithm, set to None.
interleave - “First Address Family Count” as defined in RFC 8305
loop - Optional event loop.
socket_factory - A SocketFactoryType function that, if supplied,
will be used to create sockets given an
AddrInfoType.
ssl_shutdown_timeout - DEPRECATED. Will be removed in aiohttp 4.0.
Grace period for SSL shutdown handshake on TLS
connections. Default is 0 seconds (immediate abort).
This parameter allowed for a clean SSL shutdown by
notifying the remote peer of connection closure,
while avoiding excessive delays during connector cleanup.
Note: Only takes effect on Python 3.11+.
"""
allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET | frozenset({"tcp"})
def __init__(
self,
*,
use_dns_cache: bool = True,
ttl_dns_cache: int | None = 10,
family: socket.AddressFamily = socket.AddressFamily.AF_UNSPEC,
ssl: bool | Fingerprint | SSLContext = True,
local_addr: tuple[str, int] | None = None,
resolver: AbstractResolver | None = None,
keepalive_timeout: None | float | _SENTINEL = sentinel,
force_close: bool = False,
limit: int = 100,
limit_per_host: int = 0,
enable_cleanup_closed: bool = False,
timeout_ceil_threshold: float = 5,
happy_eyeballs_delay: float | None = 0.25,
interleave: int | None = None,
socket_factory: SocketFactoryType | None = None,
ssl_shutdown_timeout: _SENTINEL | None | float = sentinel,
):
super().__init__(
keepalive_timeout=keepalive_timeout,
force_close=force_close,
limit=limit,
limit_per_host=limit_per_host,
enable_cleanup_closed=enable_cleanup_closed,
timeout_ceil_threshold=timeout_ceil_threshold,
)
if not isinstance(ssl, SSL_ALLOWED_TYPES):
raise TypeError(
"ssl should be SSLContext, Fingerprint, or bool, "
f"got {ssl!r} instead."
)
self._ssl = ssl
self._resolver: AbstractResolver
if resolver is None:
self._resolver = DefaultResolver()
self._resolver_owner = True
else:
self._resolver = resolver
self._resolver_owner = False
self._use_dns_cache = use_dns_cache
self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache)
self._throttle_dns_futures: dict[tuple[str, int], set[asyncio.Future[None]]] = (
{}
)
self._family = family
self._local_addr_infos = aiohappyeyeballs.addr_to_addr_infos(local_addr)
self._happy_eyeballs_delay = happy_eyeballs_delay
self._interleave = interleave
self._resolve_host_tasks: set[asyncio.Task[list[ResolveResult]]] = set()
self._socket_factory = socket_factory
self._ssl_shutdown_timeout: float | None
# Handle ssl_shutdown_timeout with warning for Python < 3.11
if ssl_shutdown_timeout is sentinel:
self._ssl_shutdown_timeout = 0
else:
# Deprecation warning for ssl_shutdown_timeout parameter
warnings.warn(
"The ssl_shutdown_timeout parameter is deprecated and will be removed in aiohttp 4.0",
DeprecationWarning,
stacklevel=2,
)
if (
sys.version_info < (3, 11)
and ssl_shutdown_timeout is not None
and ssl_shutdown_timeout != 0
):
warnings.warn(
f"ssl_shutdown_timeout={ssl_shutdown_timeout} is ignored on Python < 3.11; "
"only ssl_shutdown_timeout=0 is supported. The timeout will be ignored.",
RuntimeWarning,
stacklevel=2,
)
self._ssl_shutdown_timeout = ssl_shutdown_timeout
async def close(self, *, abort_ssl: bool = False) -> None:
"""Close all opened transports.
:param abort_ssl: If True, SSL connections will be aborted immediately
without performing the shutdown handshake. If False (default),
the behavior is determined by ssl_shutdown_timeout:
- If ssl_shutdown_timeout=0: connections are aborted
- If ssl_shutdown_timeout>0: graceful shutdown is performed
"""
if self._resolver_owner:
await self._resolver.close()
# Use abort_ssl param if explicitly set, otherwise use ssl_shutdown_timeout default
await super().close(abort_ssl=abort_ssl or self._ssl_shutdown_timeout == 0)
def _close_immediately(self, *, abort_ssl: bool = False) -> list[Awaitable[object]]:
for fut in chain.from_iterable(self._throttle_dns_futures.values()):
fut.cancel()
waiters = super()._close_immediately(abort_ssl=abort_ssl)
for t in self._resolve_host_tasks:
t.cancel()
waiters.append(t)
return waiters
@property
def family(self) -> int:
"""Socket family like AF_INET."""
return self._family
@property
def use_dns_cache(self) -> bool:
"""True if local DNS caching is enabled."""
return self._use_dns_cache
def clear_dns_cache(self, host: str | None = None, port: int | None = None) -> None:
"""Remove specified host/port or clear all dns local cache."""
if host is not None and port is not None:
self._cached_hosts.remove((host, port))
elif host is not None or port is not None:
raise ValueError("either both host and port or none of them are allowed")
else:
self._cached_hosts.clear()
async def _resolve_host(
self, host: str, port: int, traces: Sequence["Trace"] | None = None
) -> list[ResolveResult]:
"""Resolve host and return list of addresses."""
if is_ip_address(host):
return [
{
"hostname": host,
"host": host,
"port": port,
"family": self._family,
"proto": 0,
"flags": 0,
}
]
if not self._use_dns_cache:
if traces:
for trace in traces:
await trace.send_dns_resolvehost_start(host)
res = await self._resolver.resolve(host, port, family=self._family)
if traces:
for trace in traces:
await trace.send_dns_resolvehost_end(host)
return res
key = (host, port)
if key in self._cached_hosts and not self._cached_hosts.expired(key):
# get result early, before any await (#4014)
result = self._cached_hosts.next_addrs(key)
if traces:
for trace in traces:
await trace.send_dns_cache_hit(host)
return result
futures: set[asyncio.Future[None]]
#
# If multiple connectors are resolving the same host, we wait
# for the first one to resolve and then use the result for all of them.
# We use a throttle to ensure that we only resolve the host once
# and then use the result for all the waiters.
#
if key in self._throttle_dns_futures:
# get futures early, before any await (#4014)
futures = self._throttle_dns_futures[key]
future: asyncio.Future[None] = self._loop.create_future()
futures.add(future)
if traces:
for trace in traces:
await trace.send_dns_cache_hit(host)
try:
await future
finally:
futures.discard(future)
return self._cached_hosts.next_addrs(key)
# update dict early, before any await (#4014)
self._throttle_dns_futures[key] = futures = set()
# In this case we need to create a task to ensure that we can shield
# the task from cancellation as cancelling this lookup should not cancel
# the underlying lookup or else the cancel event will get broadcast to
# all the waiters across all connections.
#
coro = self._resolve_host_with_throttle(key, host, port, futures, traces)
loop = asyncio.get_running_loop()
if sys.version_info >= (3, 12):
# Optimization for Python 3.12, try to send immediately
resolved_host_task = asyncio.Task(coro, loop=loop, eager_start=True)
else:
resolved_host_task = loop.create_task(coro)
if not resolved_host_task.done():
self._resolve_host_tasks.add(resolved_host_task)
resolved_host_task.add_done_callback(self._resolve_host_tasks.discard)
try:
return await asyncio.shield(resolved_host_task)
except asyncio.CancelledError:
def drop_exception(fut: "asyncio.Future[list[ResolveResult]]") -> None:
with suppress(Exception, asyncio.CancelledError):
fut.result()
resolved_host_task.add_done_callback(drop_exception)
raise
async def _resolve_host_with_throttle(
self,
key: tuple[str, int],
host: str,
port: int,
futures: set[asyncio.Future[None]],
traces: Sequence["Trace"] | None,
) -> list[ResolveResult]:
"""Resolve host and set result for all waiters.
This method must be run in a task and shielded from cancellation
to avoid cancelling the underlying lookup.
"""
try:
if traces:
for trace in traces:
await trace.send_dns_cache_miss(host)
for trace in traces:
await trace.send_dns_resolvehost_start(host)
addrs = await self._resolver.resolve(host, port, family=self._family)
if traces:
for trace in traces:
await trace.send_dns_resolvehost_end(host)
self._cached_hosts.add(key, addrs)
for fut in futures:
set_result(fut, None)
except BaseException as e:
# any DNS exception is set for the waiters to raise the same exception.
# This coro is always run in task that is shielded from cancellation so
# we should never be propagating cancellation here.
for fut in futures:
set_exception(fut, e)
raise
finally:
self._throttle_dns_futures.pop(key)
return self._cached_hosts.next_addrs(key)
async def _create_connection(
self, req: ClientRequest, traces: list["Trace"], timeout: "ClientTimeout"
) -> ResponseHandler:
"""Create connection.
Has same keyword arguments as BaseEventLoop.create_connection.
"""
if req.proxy:
_, proto = await self._create_proxy_connection(req, traces, timeout)
else:
_, proto = await self._create_direct_connection(req, traces, timeout)
return proto
def _get_ssl_context(self, req: ClientRequestBase) -> SSLContext | None:
"""Logic to get the correct SSL context
0. if req.ssl is false, return None
1. if ssl_context is specified in req, use it
2. if _ssl_context is specified in self, use it
3. otherwise:
1. if verify_ssl is not specified in req, use self.ssl_context
(will generate a default context according to self.verify_ssl)
2. if verify_ssl is True in req, generate a default SSL context
3. if verify_ssl is False in req, generate a SSL context that
won't verify
"""
if not req.is_ssl():
return None
if ssl is None: # pragma: no cover
raise RuntimeError("SSL is not supported.")
sslcontext = req.ssl
if isinstance(sslcontext, ssl.SSLContext):
return sslcontext
if sslcontext is not True:
# not verified or fingerprinted
return _SSL_CONTEXT_UNVERIFIED
sslcontext = self._ssl
if isinstance(sslcontext, ssl.SSLContext):
return sslcontext
if sslcontext is not True:
# not verified or fingerprinted
return _SSL_CONTEXT_UNVERIFIED
return _SSL_CONTEXT_VERIFIED
def _get_fingerprint(self, req: ClientRequestBase) -> "Fingerprint | None":
ret = req.ssl
if isinstance(ret, Fingerprint):
return ret
ret = self._ssl
if isinstance(ret, Fingerprint):
return ret
return None
async def _wrap_create_connection(
self,
*args: Any,
addr_infos: list[AddrInfoType],
req: ClientRequestBase,
timeout: "ClientTimeout",
client_error: type[Exception] = ClientConnectorError,
**kwargs: Any,
) -> tuple[asyncio.Transport, ResponseHandler]:
try:
async with ceil_timeout(
timeout.sock_connect, ceil_threshold=timeout.ceil_threshold
):
sock = await aiohappyeyeballs.start_connection(
addr_infos=addr_infos,
local_addr_infos=self._local_addr_infos,
happy_eyeballs_delay=self._happy_eyeballs_delay,
interleave=self._interleave,
loop=self._loop,
socket_factory=self._socket_factory,
)
# Add ssl_shutdown_timeout for Python 3.11+ when SSL is used
if (
kwargs.get("ssl")
and self._ssl_shutdown_timeout
and sys.version_info >= (3, 11)
):
kwargs["ssl_shutdown_timeout"] = self._ssl_shutdown_timeout
return await self._loop.create_connection(*args, **kwargs, sock=sock)
except cert_errors as exc:
raise ClientConnectorCertificateError(req.connection_key, exc) from exc
except ssl_errors as exc:
raise ClientConnectorSSLError(req.connection_key, exc) from exc
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
raise client_error(req.connection_key, exc) from exc
def _warn_about_tls_in_tls(
self,
underlying_transport: asyncio.Transport,
req: ClientRequest,
) -> None:
"""Issue a warning if the requested URL has HTTPS scheme."""
if req.url.scheme != "https":
return
# Check if uvloop is being used, which supports TLS in TLS,
# otherwise assume that asyncio's native transport is being used.
if type(underlying_transport).__module__.startswith("uvloop"):
return
# Support in asyncio was added in Python 3.11 (bpo-44011)
asyncio_supports_tls_in_tls = sys.version_info >= (3, 11) or getattr(
underlying_transport,
"_start_tls_compatible",
False,
)
if asyncio_supports_tls_in_tls:
return
warnings.warn(
"An HTTPS request is being sent through an HTTPS proxy. "
"This support for TLS in TLS is known to be disabled "
"in the stdlib asyncio. This is why you'll probably see "
"an error in the log below.\n\n"
"It is possible to enable it via monkeypatching. "
"For more details, see:\n"
"* https://bugs.python.org/issue37179\n"
"* https://github.com/python/cpython/pull/28073\n\n"
"You can temporarily patch this as follows:\n"
"* https://docs.aiohttp.org/en/stable/client_advanced.html#proxy-support\n"
"* https://github.com/aio-libs/aiohttp/discussions/6044\n",
RuntimeWarning,
source=self,
# Why `4`? At least 3 of the calls in the stack originate
# from the methods in this class.
stacklevel=3,
)
async def _start_tls_connection(
self,
underlying_transport: asyncio.Transport,
req: ClientRequest,
timeout: "ClientTimeout",
client_error: type[Exception] = ClientConnectorError,
) -> tuple[asyncio.BaseTransport, ResponseHandler]:
"""Wrap the raw TCP transport with TLS."""
tls_proto = self._factory() # Create a brand new proto for TLS
sslcontext = self._get_ssl_context(req)
if TYPE_CHECKING:
# _start_tls_connection is unreachable in the current code path
# if sslcontext is None.
assert sslcontext is not None
try:
async with ceil_timeout(
timeout.sock_connect, ceil_threshold=timeout.ceil_threshold
):
try:
# ssl_shutdown_timeout is only available in Python 3.11+
if sys.version_info >= (3, 11) and self._ssl_shutdown_timeout:
tls_transport = await self._loop.start_tls(
underlying_transport,
tls_proto,
sslcontext,
server_hostname=req.server_hostname or req.url.raw_host,
ssl_handshake_timeout=timeout.total,
ssl_shutdown_timeout=self._ssl_shutdown_timeout,
)
else:
tls_transport = await self._loop.start_tls(
underlying_transport,
tls_proto,
sslcontext,
server_hostname=req.server_hostname or req.url.raw_host,
ssl_handshake_timeout=timeout.total,
)
except BaseException:
# We need to close the underlying transport since
# `start_tls()` probably failed before it had a
# chance to do this:
if self._ssl_shutdown_timeout == 0:
underlying_transport.abort()
else:
underlying_transport.close()
raise
if isinstance(tls_transport, asyncio.Transport):
fingerprint = self._get_fingerprint(req)
if fingerprint:
try:
fingerprint.check(tls_transport)
except ServerFingerprintMismatch:
tls_transport.close()
if not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(tls_transport)
raise
except cert_errors as exc:
raise ClientConnectorCertificateError(req.connection_key, exc) from exc
except ssl_errors as exc:
raise ClientConnectorSSLError(req.connection_key, exc) from exc
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
raise client_error(req.connection_key, exc) from exc
except TypeError as type_err:
# Example cause looks like this:
# TypeError: transport <asyncio.sslproto._SSLProtocolTransport
# object at 0x7f760615e460> is not supported by start_tls()
raise ClientConnectionError(
"Cannot initialize a TLS-in-TLS connection to host "
f"{req.url.host!s}:{req.url.port:d} through an underlying connection "
f"to an HTTPS proxy {req.proxy!s} ssl:{req.ssl or 'default'} "
f"[{type_err!s}]"
) from type_err
else:
if tls_transport is None:
msg = "Failed to start TLS (possibly caused by closing transport)"
raise client_error(req.connection_key, OSError(msg))
tls_proto.connection_made(
tls_transport
) # Kick the state machine of the new TLS protocol
return tls_transport, tls_proto
def _convert_hosts_to_addr_infos(
self, hosts: list[ResolveResult]
) -> list[AddrInfoType]:
"""Converts the list of hosts to a list of addr_infos.
The list of hosts is the result of a DNS lookup. The list of
addr_infos is the result of a call to `socket.getaddrinfo()`.
"""
addr_infos: list[AddrInfoType] = []
for hinfo in hosts:
host = hinfo["host"]
is_ipv6 = ":" in host
family = socket.AF_INET6 if is_ipv6 else socket.AF_INET
if self._family and self._family != family:
continue
addr = (host, hinfo["port"], 0, 0) if is_ipv6 else (host, hinfo["port"])
addr_infos.append(
(family, socket.SOCK_STREAM, socket.IPPROTO_TCP, "", addr)
)
return addr_infos
async def _create_direct_connection(
self,
req: ClientRequestBase,
traces: list["Trace"],
timeout: "ClientTimeout",
*,
client_error: type[Exception] = ClientConnectorError,
) -> tuple[asyncio.Transport, ResponseHandler]:
sslcontext = self._get_ssl_context(req)
fingerprint = self._get_fingerprint(req)
host = req.url.raw_host
assert host is not None
# Replace multiple trailing dots with a single one.
# A trailing dot is only present for fully-qualified domain names.
# See https://github.com/aio-libs/aiohttp/pull/7364.
if host.endswith(".."):
host = host.rstrip(".") + "."
port = req.url.port
assert port is not None
try:
# Cancelling this lookup should not cancel the underlying lookup
# or else the cancel event will get broadcast to all the waiters
# across all connections.
hosts = await self._resolve_host(host, port, traces=traces)
except OSError as exc:
if exc.errno is None and isinstance(exc, asyncio.TimeoutError):
raise
# in case of proxy it is not ClientProxyConnectionError
# it is problem of resolving proxy ip itself
raise ClientConnectorDNSError(req.connection_key, exc) from exc
last_exc: Exception | None = None
addr_infos = self._convert_hosts_to_addr_infos(hosts)
while addr_infos:
# Strip trailing dots, certificates contain FQDN without dots.
# See https://github.com/aio-libs/aiohttp/issues/3636
server_hostname = (
(req.server_hostname or host).rstrip(".") if sslcontext else None
)
try:
transp, proto = await self._wrap_create_connection(
self._factory,
timeout=timeout,
ssl=sslcontext,
addr_infos=addr_infos,
server_hostname=server_hostname,
req=req,
client_error=client_error,
)
except (ClientConnectorError, asyncio.TimeoutError) as exc:
last_exc = exc
aiohappyeyeballs.pop_addr_infos_interleave(addr_infos, self._interleave)
continue
if req.is_ssl() and fingerprint:
try:
fingerprint.check(transp)
except ServerFingerprintMismatch as exc:
transp.close()
if not self._cleanup_closed_disabled:
self._cleanup_closed_transports.append(transp)
last_exc = exc
# Remove the bad peer from the list of addr_infos
sock: socket.socket = transp.get_extra_info("socket")
bad_peer = sock.getpeername()
aiohappyeyeballs.remove_addr_infos(addr_infos, bad_peer)
continue
return transp, proto
assert last_exc is not None
raise last_exc
async def _create_proxy_connection(
self, req: ClientRequest, traces: list["Trace"], timeout: "ClientTimeout"
) -> tuple[asyncio.BaseTransport, ResponseHandler]:
headers = CIMultiDict[str]() if req.proxy_headers is None else req.proxy_headers
headers[hdrs.HOST] = req.headers[hdrs.HOST]
url = req.proxy
assert url is not None
proxy_req = ClientRequestBase(
hdrs.METH_GET,
url,
headers=headers,
auth=req.proxy_auth,
loop=self._loop,
ssl=req.ssl,
)
# create connection to proxy server
transport, proto = await self._create_direct_connection(
proxy_req, [], timeout, client_error=ClientProxyConnectionError
)
auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None)
if auth is not None:
if not req.is_ssl():
req.headers[hdrs.PROXY_AUTHORIZATION] = auth
else:
proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth
if req.is_ssl():
self._warn_about_tls_in_tls(transport, req)
# For HTTPS requests over HTTP proxy
# we must notify proxy to tunnel connection
# so we send CONNECT command:
# CONNECT www.python.org:443 HTTP/1.1
# Host: www.python.org
#
# next we must do TLS handshake and so on
# to do this we must wrap raw socket into secure one
# asyncio handles this perfectly
proxy_req.method = hdrs.METH_CONNECT
proxy_req.url = req.url
key = req.connection_key._replace(
proxy=None, proxy_auth=None, proxy_headers_hash=None
)
conn = _ConnectTunnelConnection(self, key, proto, self._loop)
proxy_resp = await proxy_req._send(conn)
try:
protocol = conn._protocol
assert protocol is not None
# read_until_eof=True will ensure the connection isn't closed
# once the response is received and processed allowing
# START_TLS to work on the connection below.
protocol.set_response_params(
read_until_eof=True,
timeout_ceil_threshold=self._timeout_ceil_threshold,
)
resp = await proxy_resp.start(conn)
except BaseException:
proxy_resp.close()
conn.close()
raise
else:
conn._protocol = None
try:
if resp.status != 200:
message = resp.reason
if message is None:
message = HTTPStatus(resp.status).phrase
raise ClientHttpProxyError(
proxy_resp.request_info,
resp.history,
status=resp.status,
message=message,
headers=resp.headers,
)
except BaseException:
# It shouldn't be closed in `finally` because it's fed to
# `loop.start_tls()` and the docs say not to touch it after
# passing there.
transport.close()
raise
return await self._start_tls_connection(
# Access the old transport for the last time before it's
# closed and forgotten forever:
transport,
req=req,
timeout=timeout,
)
finally:
proxy_resp.close()
return transport, proto
| TCPConnector |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/parsers/base_argument_parsers.py | {
"start": 247,
"end": 773
} | class ____(NamespaceParser, metaclass=abc.ABCMeta):
"""Base class for controller namespace parsers."""
@property
def dest(self) -> str:
"""The name of the attribute where the value should be stored."""
return 'controller'
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
if state.root_namespace.targets:
raise ControllerRequiredFirstError()
return super().parse(state)
| ControllerNamespaceParser |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 23811,
"end": 24548
} | class ____:
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
| subtest |
python | openai__openai-python | src/openai/resources/realtime/calls.py | {
"start": 32531,
"end": 33169
} | class ____:
def __init__(self, calls: AsyncCalls) -> None:
self._calls = calls
self.create = async_to_custom_streamed_response_wrapper(
calls.create,
AsyncStreamedBinaryAPIResponse,
)
self.accept = async_to_streamed_response_wrapper(
calls.accept,
)
self.hangup = async_to_streamed_response_wrapper(
calls.hangup,
)
self.refer = async_to_streamed_response_wrapper(
calls.refer,
)
self.reject = async_to_streamed_response_wrapper(
calls.reject,
)
| AsyncCallsWithStreamingResponse |
python | huggingface__transformers | tests/models/glm4/test_modeling_glm4.py | {
"start": 1145,
"end": 1272
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = Glm4Model
@require_torch
| Glm4ModelTester |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/skills/versions.py | {
"start": 12176,
"end": 23102
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncVersionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return AsyncVersionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncVersionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return AsyncVersionsWithStreamingResponse(self)
async def create(
self,
skill_id: str,
*,
files: Optional[SequenceNotStr[FileTypes]] | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VersionCreateResponse:
"""
Create Skill Version
Args:
skill_id: Unique identifier for the skill.
The format and length of IDs may change over time.
files: Files to upload for the skill.
All files must be in the same top-level directory and must include a SKILL.md
file at the root of that directory.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
body = deepcopy_minimal({"files": files})
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers["Content-Type"] = "multipart/form-data"
return await self._post(
f"/v1/skills/{skill_id}/versions?beta=true",
body=await async_maybe_transform(body, version_create_params.VersionCreateParams),
files=extracted_files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VersionCreateResponse,
)
async def retrieve(
self,
version: str,
*,
skill_id: str,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VersionRetrieveResponse:
"""
Get Skill Version
Args:
skill_id: Unique identifier for the skill.
The format and length of IDs may change over time.
version: Version identifier for the skill.
Each version is identified by a Unix epoch timestamp (e.g., "1759178010641129").
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
if not version:
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
return await self._get(
f"/v1/skills/{skill_id}/versions/{version}?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VersionRetrieveResponse,
)
def list(
self,
skill_id: str,
*,
limit: Optional[int] | Omit = omit,
page: Optional[str] | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[VersionListResponse, AsyncPageCursor[VersionListResponse]]:
"""
List Skill Versions
Args:
skill_id: Unique identifier for the skill.
The format and length of IDs may change over time.
limit: Number of items to return per page.
Defaults to `20`. Ranges from `1` to `1000`.
page: Optionally set to the `next_page` token from the previous response.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
return self._get_api_list(
f"/v1/skills/{skill_id}/versions?beta=true",
page=AsyncPageCursor[VersionListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"limit": limit,
"page": page,
},
version_list_params.VersionListParams,
),
),
model=VersionListResponse,
)
async def delete(
self,
version: str,
*,
skill_id: str,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VersionDeleteResponse:
"""
Delete Skill Version
Args:
skill_id: Unique identifier for the skill.
The format and length of IDs may change over time.
version: Version identifier for the skill.
Each version is identified by a Unix epoch timestamp (e.g., "1759178010641129").
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
if not version:
raise ValueError(f"Expected a non-empty value for `version` but received {version!r}")
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
return await self._delete(
f"/v1/skills/{skill_id}/versions/{version}?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VersionDeleteResponse,
)
| AsyncVersions |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 48046,
"end": 54858
} | class ____(Base):
"""
DB model for evaluation dataset records.
"""
__tablename__ = "evaluation_dataset_records"
RECORD_ID_PREFIX = "dr-"
dataset_record_id = Column(String(36), primary_key=True)
"""
Dataset record ID: `String` (limit 36 characters).
*Primary Key* for ``evaluation_dataset_records`` table.
"""
dataset_id = Column(
String(36), ForeignKey("evaluation_datasets.dataset_id", ondelete="CASCADE"), nullable=False
)
"""
Dataset ID: `String` (limit 36 characters). Foreign key to evaluation_datasets.
"""
inputs = Column(MutableJSON, nullable=False)
"""
Inputs JSON: `JSON`. *Non null* in table schema.
"""
outputs = Column(MutableJSON, nullable=True)
"""
Outputs JSON: `JSON`.
"""
expectations = Column(MutableJSON, nullable=True)
"""
Expectations JSON: `JSON`.
"""
tags = Column(MutableJSON, nullable=True)
"""
Tags JSON: `JSON`.
"""
source = Column(MutableJSON, nullable=True)
"""
Source JSON: `JSON`.
"""
source_id = Column(String(36), nullable=True)
"""
Source ID for lookups: `String` (limit 36 characters).
"""
source_type = Column(String(255), nullable=True)
"""
Source type: `Text`.
"""
created_time = Column(BigInteger, default=get_current_time_millis)
"""
Creation time: `BigInteger`.
"""
last_update_time = Column(BigInteger, default=get_current_time_millis)
"""
Last update time: `BigInteger`.
"""
created_by = Column(String(255), nullable=True)
"""
Creator user ID: `String` (limit 255 characters).
"""
last_updated_by = Column(String(255), nullable=True)
"""
Last updater user ID: `String` (limit 255 characters).
"""
input_hash = Column(String(64), nullable=False)
"""
Hash of inputs for deduplication: `String` (limit 64 characters).
"""
dataset = relationship("SqlEvaluationDataset", back_populates="records")
__table_args__ = (
PrimaryKeyConstraint("dataset_record_id", name="evaluation_dataset_records_pk"),
Index("index_evaluation_dataset_records_dataset_id", "dataset_id"),
UniqueConstraint("dataset_id", "input_hash", name="unique_dataset_input"),
ForeignKeyConstraint(
["dataset_id"],
["evaluation_datasets.dataset_id"],
name="fk_evaluation_dataset_records_dataset_id",
ondelete="CASCADE",
),
)
def __init__(self, **kwargs):
"""Initialize a new dataset record with auto-generated ID if not provided."""
if "dataset_record_id" not in kwargs:
kwargs["dataset_record_id"] = self.generate_record_id()
super().__init__(**kwargs)
@staticmethod
def generate_record_id() -> str:
"""
Generate a unique ID for dataset records.
Returns:
A unique record ID with the format "dr-<uuid_hex>".
"""
return f"{SqlEvaluationDatasetRecord.RECORD_ID_PREFIX}{uuid.uuid4().hex}"
def to_mlflow_entity(self):
inputs = self.inputs
expectations = self.expectations
tags = self.tags
outputs = self.outputs.get(DATASET_RECORD_WRAPPED_OUTPUT_KEY) if self.outputs else None
source = None
if self.source:
source = DatasetRecordSource.from_dict(self.source)
return DatasetRecord(
dataset_record_id=self.dataset_record_id,
dataset_id=self.dataset_id,
inputs=inputs,
outputs=outputs,
expectations=expectations,
tags=tags,
source=source,
source_id=self.source_id,
created_time=self.created_time,
last_update_time=self.last_update_time,
created_by=self.created_by,
last_updated_by=self.last_updated_by,
)
@classmethod
def from_mlflow_entity(cls, record: DatasetRecord, input_hash: str):
"""
Create SqlEvaluationDatasetRecord from DatasetRecord entity.
Args:
record: DatasetRecord entity
input_hash: SHA256 hash of inputs for deduplication
Returns:
SqlEvaluationDatasetRecord instance
"""
source_dict = None
if record.source:
source_dict = record.source.to_dict()
outputs = (
{DATASET_RECORD_WRAPPED_OUTPUT_KEY: record.outputs}
if record.outputs is not None
else None
)
kwargs = {
"dataset_id": record.dataset_id,
"inputs": record.inputs,
"outputs": outputs,
"expectations": record.expectations,
"tags": record.tags,
"source": source_dict,
"source_id": record.source_id,
"source_type": record.source.source_type if record.source else None,
"created_time": record.created_time or get_current_time_millis(),
"last_update_time": record.last_update_time or get_current_time_millis(),
"created_by": record.created_by,
"last_updated_by": record.last_updated_by,
"input_hash": input_hash,
}
if record.dataset_record_id:
kwargs["dataset_record_id"] = record.dataset_record_id
return cls(**kwargs)
def merge(self, new_record_dict: dict[str, Any]) -> None:
"""
Merge new record data into this existing record.
Updates outputs, expectations and tags by merging new values with existing ones.
Preserves created_time and created_by from the original record.
Args:
new_record_dict: Dictionary containing new record data with optional
'outputs', 'expectations' and 'tags' fields to merge.
"""
if "outputs" in new_record_dict:
new_outputs = new_record_dict["outputs"]
self.outputs = (
{DATASET_RECORD_WRAPPED_OUTPUT_KEY: new_outputs}
if new_outputs is not None
else None
)
if new_expectations := new_record_dict.get("expectations"):
if self.expectations is None:
self.expectations = {}
self.expectations.update(new_expectations)
if new_tags := new_record_dict.get("tags"):
if self.tags is None:
self.tags = {}
self.tags.update(new_tags)
self.last_update_time = get_current_time_millis()
# Update last_updated_by if mlflow.user tag is present
# Otherwise keep the existing last_updated_by (don't change it to None)
if new_tags and MLFLOW_USER in new_tags:
self.last_updated_by = new_tags[MLFLOW_USER]
| SqlEvaluationDatasetRecord |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/secrets/loader.py | {
"start": 180,
"end": 415
} | class ____(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):
@abstractmethod
def get_secrets_for_environment(self, location_name: Optional[str]) -> Mapping[str, str]:
pass
def dispose(self):
return
| SecretsLoader |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/models/edge_logs.py | {
"start": 1224,
"end": 3022
} | class ____(Base, LoggingMixin):
"""
Temporary collected logs from a Edge Worker while job runs on remote site.
As the Edge Worker in most cases has a local file system and the web UI no access
to read files from remote site, Edge Workers will send incremental chunks of logs
of running jobs to the central site. As log storage backends in most cloud cases can not
append logs, the table is used as buffer to receive. Upon task completion logs can be
flushed to task log handler.
Log data therefore is collected in chunks and is only temporary.
"""
__tablename__ = "edge_logs"
dag_id: Mapped[str] = mapped_column(StringID(), primary_key=True, nullable=False)
task_id: Mapped[str] = mapped_column(StringID(), primary_key=True, nullable=False)
run_id: Mapped[str] = mapped_column(StringID(), primary_key=True, nullable=False)
map_index: Mapped[int] = mapped_column(
Integer, primary_key=True, nullable=False, server_default=text("-1")
)
try_number: Mapped[int] = mapped_column(Integer, primary_key=True, default=0)
log_chunk_time: Mapped[datetime] = mapped_column(UtcDateTime, primary_key=True, nullable=False)
log_chunk_data: Mapped[str] = mapped_column(Text().with_variant(MEDIUMTEXT(), "mysql"), nullable=False)
def __init__(
self,
dag_id: str,
task_id: str,
run_id: str,
map_index: int,
try_number: int,
log_chunk_time: datetime,
log_chunk_data: str,
):
self.dag_id = dag_id
self.task_id = task_id
self.run_id = run_id
self.map_index = map_index
self.try_number = try_number
self.log_chunk_time = log_chunk_time
self.log_chunk_data = log_chunk_data
super().__init__()
| EdgeLogsModel |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 260226,
"end": 262020
} | class ____(ExternKernel):
"""
This needs to be a custom class to handle mutation properly.
This class handles both aten.scatter_ and aten.scatter_reduce_.
It also handle the case `src` being a scalar properly.
"""
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
wrapper.generate_scatter_fallback(self)
def should_allocate(self) -> bool:
return False
def get_mutation_names(self) -> list[str]:
inp = self.inputs[0]
assert isinstance(inp, IRNode)
return [inp.get_name()]
def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:
return OrderedSet()
def __init__(
self,
op_overload: _OpOverloads,
x: IRNode,
dim: int,
index: IRNode,
src: IRNode,
*,
reduce: Optional[str] = None,
include_self: bool = True,
) -> None:
self.src_is_tensor = isinstance(src, TensorBox)
constant_args: tuple[Any, ...]
if self.src_is_tensor:
tensors = [self.realize_input(t) for t in [x, index, src]]
constant_args = (dim,)
else:
tensors = [self.realize_input(t) for t in [x, index]]
constant_args = (dim, src)
super().__init__(
None,
NoneLayout(device=x.get_device()),
self.unwrap_storage(tensors),
constant_args,
{"reduce": reduce, "include_self": include_self},
python_kernel_name=str(op_overload),
ordered_kwargs_for_cpp_kernel=["reduce", "include_self"],
op_overload=op_overload,
)
V.graph.mark_buffer_mutated(x.get_name())
self.name = V.graph.register_buffer(self)
V.graph.register_operation(self)
| ScatterFallback |
python | django__django | tests/utils_tests/test_module_loading.py | {
"start": 5968,
"end": 8451
} | class ____(SimpleTestCase):
def tearDown(self):
sys.path_importer_cache.clear()
sys.modules.pop("utils_tests.test_module.another_bad_module", None)
sys.modules.pop("utils_tests.test_module.another_good_module", None)
sys.modules.pop("utils_tests.test_module.bad_module", None)
sys.modules.pop("utils_tests.test_module.good_module", None)
sys.modules.pop("utils_tests.test_module", None)
def test_autodiscover_modules_found(self):
autodiscover_modules("good_module")
def test_autodiscover_modules_not_found(self):
autodiscover_modules("missing_module")
def test_autodiscover_modules_found_but_bad_module(self):
with self.assertRaisesMessage(
ImportError, "No module named 'a_package_name_that_does_not_exist'"
):
autodiscover_modules("bad_module")
def test_autodiscover_modules_several_one_bad_module(self):
with self.assertRaisesMessage(
ImportError, "No module named 'a_package_name_that_does_not_exist'"
):
autodiscover_modules("good_module", "bad_module")
def test_autodiscover_modules_several_found(self):
autodiscover_modules("good_module", "another_good_module")
def test_autodiscover_modules_several_found_with_registry(self):
from .test_module import site
autodiscover_modules("good_module", "another_good_module", register_to=site)
self.assertEqual(site._registry, {"lorem": "ipsum"})
def test_validate_registry_keeps_intact(self):
from .test_module import site
with self.assertRaisesMessage(Exception, "Some random exception."):
autodiscover_modules("another_bad_module", register_to=site)
self.assertEqual(site._registry, {})
def test_validate_registry_resets_after_erroneous_module(self):
from .test_module import site
with self.assertRaisesMessage(Exception, "Some random exception."):
autodiscover_modules(
"another_good_module", "another_bad_module", register_to=site
)
self.assertEqual(site._registry, {"lorem": "ipsum"})
def test_validate_registry_resets_after_missing_module(self):
from .test_module import site
autodiscover_modules(
"does_not_exist", "another_good_module", "does_not_exist2", register_to=site
)
self.assertEqual(site._registry, {"lorem": "ipsum"})
| AutodiscoverModulesTestCase |
python | pyca__cryptography | tests/hazmat/primitives/test_rsa.py | {
"start": 94819,
"end": 101209
} | class ____:
@pytest.mark.parametrize(
("key_path", "loader_func", "encoding", "format"),
[
(
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.pem"),
serialization.load_pem_public_key,
serialization.Encoding.PEM,
serialization.PublicFormat.PKCS1,
),
(
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.der"),
serialization.load_der_public_key,
serialization.Encoding.DER,
serialization.PublicFormat.PKCS1,
),
(
os.path.join("asymmetric", "PKCS8", "unenc-rsa-pkcs8.pub.pem"),
serialization.load_pem_public_key,
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo,
),
(
os.path.join(
"asymmetric",
"DER_Serialization",
"unenc-rsa-pkcs8.pub.der",
),
serialization.load_der_public_key,
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo,
),
],
)
def test_public_bytes_match(
self, key_path, loader_func, encoding, format, backend
):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
key = loader_func(key_bytes, backend)
serialized = key.public_bytes(encoding, format)
assert serialized == key_bytes
def test_public_bytes_openssh(self, backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.pem"),
lambda pemfile: pemfile.read(),
mode="rb",
)
key = serialization.load_pem_public_key(key_bytes, backend)
ssh_bytes = key.public_bytes(
serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH
)
assert ssh_bytes == (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC7JHoJfg6yNzLMOWet8Z49a4KD"
b"0dCspMAYvo2YAMB7/wdEycocujbhJ2n/seONi+5XqTqqFkM5VBl8rmkkFPZk/7x0"
b"xmdsTPECSWnHK+HhoaNDFPR3j8jQhVo1laxiqcEhAHegi5cwtFosuJAvSKAFKEvy"
b"D43si00DQnXWrYHAEQ=="
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.OpenSSH
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.DER, serialization.PublicFormat.OpenSSH
)
with pytest.raises(TypeError):
key.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.PKCS1,
)
with pytest.raises(TypeError):
key.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
def test_public_bytes_invalid_encoding(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
key = rsa_key_2048.public_key()
with pytest.raises(TypeError):
key.public_bytes(
"notencoding", # type: ignore[arg-type]
serialization.PublicFormat.PKCS1,
)
def test_public_bytes_invalid_format(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
key = rsa_key_2048.public_key()
with pytest.raises(TypeError):
key.public_bytes(
serialization.Encoding.PEM,
"invalidformat", # type: ignore[arg-type]
)
@pytest.mark.parametrize(
("encoding", "fmt"),
[
(
serialization.Encoding.Raw,
serialization.PublicFormat.SubjectPublicKeyInfo,
),
(serialization.Encoding.Raw, serialization.PublicFormat.PKCS1),
*itertools.product(
[
serialization.Encoding.Raw,
serialization.Encoding.X962,
serialization.Encoding.PEM,
serialization.Encoding.DER,
],
[
serialization.PublicFormat.Raw,
serialization.PublicFormat.UncompressedPoint,
serialization.PublicFormat.CompressedPoint,
],
),
],
)
def test_public_bytes_rejects_invalid(
self, rsa_key_2048: rsa.RSAPrivateKey, encoding, fmt, backend
):
key = rsa_key_2048.public_key()
with pytest.raises((ValueError, TypeError)):
key.public_bytes(encoding, fmt)
def test_public_key_equality(self, rsa_key_2048: rsa.RSAPrivateKey):
key1 = rsa_key_2048.public_key()
key2 = RSA_KEY_2048.private_key(
unsafe_skip_rsa_key_validation=True
).public_key()
key3 = RSA_KEY_2048_ALT.private_key(
unsafe_skip_rsa_key_validation=True
).public_key()
assert key1 == key2
assert key1 != key3
assert key1 != object()
with pytest.raises(TypeError):
key1 < key2 # type: ignore[operator]
def test_public_key_copy(self, rsa_key_2048: rsa.RSAPrivateKey):
key1 = rsa_key_2048.public_key()
key2 = copy.copy(key1)
assert key1 == key2
def test_private_key_copy(self, rsa_key_2048: rsa.RSAPrivateKey):
key1 = rsa_key_2048
key2 = copy.copy(key1)
assert key1 == key2
def test_public_key_deepcopy(
self, rsa_key_2048: rsa.RSAPrivateKey, rsa_key_512: rsa.RSAPrivateKey
):
key1 = rsa_key_2048.public_key()
key2 = copy.deepcopy(key1)
assert key1.public_numbers() == key2.public_numbers()
key1 = rsa_key_512.public_key()
assert key1.public_numbers() != key2.public_numbers()
def test_private_key_deepcopy(
self, rsa_key_2048: rsa.RSAPrivateKey, rsa_key_512: rsa.RSAPrivateKey
):
key1 = rsa_key_2048
key2 = copy.deepcopy(key1)
assert key1.private_numbers() == key2.private_numbers()
key1 = rsa_key_512
assert key1.private_numbers() != key2.private_numbers()
| TestRSAPEMPublicKeySerialization |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 2257,
"end": 3120
} | class ____(MemoryLeakMixin, TestCase):
def test_list_allocation(self):
@njit
def foo_kwarg(n):
l = listobject.new_list(int32, allocated=n)
return l._allocated()
for i in range(16):
self.assertEqual(foo_kwarg(i), i)
@njit
def foo_posarg(n):
l = listobject.new_list(int32, n)
return l._allocated()
for i in range(16):
self.assertEqual(foo_posarg(i), i)
def test_list_allocation_negative(self):
@njit
def foo():
l = listobject.new_list(int32, -1)
return l._allocated()
with self.assertRaises(RuntimeError) as raises:
self.assertEqual(foo(), -1)
self.assertIn(
"expecting *allocated* to be >= 0",
str(raises.exception),
)
| TestAllocation |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 572711,
"end": 573107
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("DeploymentReview", graphql_name="node")
"""The item at the end of the edge."""
| DeploymentReviewEdge |
python | huggingface__transformers | tests/models/convnext/test_modeling_convnext.py | {
"start": 5648,
"end": 9507
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ConvNext does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = ConvNextModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=ConvNextConfig,
has_text_modality=False,
hidden_size=37,
common_properties=["num_channels", "hidden_sizes"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="ConvNext does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_backbone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_stages = self.model_tester.num_stages
self.assertEqual(len(hidden_states), expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 4, self.model_tester.image_size // 4],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "facebook/convnext-tiny-224"
model = ConvNextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| ConvNextModelTest |
python | pallets__quart | src/quart/asgi.py | {
"start": 13769,
"end": 15720
} | class ____:
def __init__(self, app: Quart, scope: LifespanScope) -> None:
self.app = app
async def __call__(
self, receive: ASGIReceiveCallable, send: ASGISendCallable
) -> None:
while True:
event = await receive()
if event["type"] == "lifespan.startup":
try:
await self.app.startup()
except Exception as error:
await send(
cast(
LifespanStartupFailedEvent,
{"type": "lifespan.startup.failed", "message": str(error)},
),
)
else:
await send(
cast(
LifespanStartupCompleteEvent,
{"type": "lifespan.startup.complete"},
)
)
elif event["type"] == "lifespan.shutdown":
try:
await self.app.shutdown()
except Exception as error:
await send(
cast(
LifespanShutdownFailedEvent,
{"type": "lifespan.shutdown.failed", "message": str(error)},
),
)
else:
await send(
cast(
LifespanShutdownCompleteEvent,
{"type": "lifespan.shutdown.complete"},
),
)
break
def _convert_version(raw: str) -> list[int]:
return list(map(int, raw.split(".")))
async def _handle_exception(app: Quart, error: Exception) -> Response:
if not app.testing and app.config["PROPAGATE_EXCEPTIONS"]:
return await traceback_response(error)
else:
raise error
| ASGILifespan |
python | PyCQA__pylint | tests/functional/a/assigning/assigning_non_slot.py | {
"start": 3232,
"end": 3483
} | class ____:
__slots__ = []
def release(self):
self.__class__ = ClassWithSlots # [assigning-non-slot]
self.test = 'test' # [assigning-non-slot]
# pylint: disable=attribute-defined-outside-init
| ClassReassingingInvalidLayoutClass |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/forLoop1.py | {
"start": 1158,
"end": 1365
} | class ____:
def __init__(self):
self.__iter__ = lambda: iter([])
# This should generate an error because A
# is not iterable. The __iter__ method is an
# instance variable.
for a in A():
...
| A |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 2354,
"end": 2950
} | class ____(PrefectFilterBaseModel):
"""Base model for Prefect filters that combines criteria with a user-provided operator"""
operator: Operator = Field(
default=Operator.and_,
description="Operator for combining filter criteria. Defaults to 'and_'.",
)
@db_injector
def as_sql_filter(self, db: "PrefectDBInterface") -> sa.ColumnElement[bool]:
filters = self._get_filter_list(db)
if not filters:
return sa.true()
return sa.and_(*filters) if self.operator == Operator.and_ else sa.or_(*filters)
| PrefectOperatorFilterBaseModel |
python | openai__openai-python | src/openai/lib/_tools.py | {
"start": 344,
"end": 815
} | class ____(Dict[str, Any]):
"""Dictionary wrapper so we can pass the given base model
throughout the entire request stack without having to special
case it.
"""
model: type[pydantic.BaseModel]
def __init__(self, defn: FunctionDefinition, model: type[pydantic.BaseModel]) -> None:
super().__init__(defn)
self.model = model
def cast(self) -> FunctionDefinition:
return cast(FunctionDefinition, self)
| PydanticFunctionTool |
python | python__mypy | mypy/stubutil.py | {
"start": 21871,
"end": 33478
} | class ____:
# These names should be omitted from generated stubs.
IGNORED_DUNDERS: Final = {
"__all__",
"__author__",
"__about__",
"__copyright__",
"__email__",
"__license__",
"__summary__",
"__title__",
"__uri__",
"__str__",
"__repr__",
"__getstate__",
"__setstate__",
"__slots__",
"__builtins__",
"__cached__",
"__file__",
"__name__",
"__package__",
"__path__",
"__spec__",
"__loader__",
}
TYPING_MODULE_NAMES: Final = ("typing", "typing_extensions")
# Special-cased names that are implicitly exported from the stub (from m import y as y).
EXTRA_EXPORTED: Final = {
"pyasn1_modules.rfc2437.univ",
"pyasn1_modules.rfc2459.char",
"pyasn1_modules.rfc2459.univ",
}
def __init__(
self,
_all_: list[str] | None = None,
include_private: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
) -> None:
# Best known value of __all__.
self._all_ = _all_
self._include_private = include_private
self._include_docstrings = include_docstrings
# Disable implicit exports of package-internal imports?
self.export_less = export_less
self._import_lines: list[str] = []
self._output: list[str] = []
# Current indent level (indent is hardcoded to 4 spaces).
self._indent = ""
self._toplevel_names: list[str] = []
self.import_tracker = ImportTracker()
# Top-level members
self.defined_names: set[str] = set()
self.sig_generators = self.get_sig_generators()
# populated by visit_mypy_file
self.module_name: str = ""
# These are "soft" imports for objects which might appear in annotations but not have
# a corresponding import statement.
self.known_imports = {
"_typeshed": ["Incomplete"],
"typing": ["Any", "TypeVar", "NamedTuple", "TypedDict"],
"collections.abc": ["Generator"],
"typing_extensions": ["ParamSpec", "TypeVarTuple"],
}
def get_sig_generators(self) -> list[SignatureGenerator]:
return []
def resolve_name(self, name: str) -> str:
"""Return the full name resolving imports and import aliases."""
if "." not in name:
real_module = self.import_tracker.module_for.get(name)
real_short = self.import_tracker.reverse_alias.get(name, name)
if real_module is None and real_short not in self.defined_names:
real_module = "builtins" # not imported and not defined, must be a builtin
else:
name_module, real_short = name.split(".", 1)
real_module = self.import_tracker.reverse_alias.get(name_module, name_module)
resolved_name = real_short if real_module is None else f"{real_module}.{real_short}"
return resolved_name
def add_name(self, fullname: str, require: bool = True) -> str:
"""Add a name to be imported and return the name reference.
The import will be internal to the stub (i.e don't reexport).
"""
module, name = fullname.rsplit(".", 1)
alias = "_" + name if name in self.defined_names else None
while alias in self.defined_names:
alias = "_" + alias
if module != "builtins" or alias: # don't import from builtins unless needed
self.import_tracker.add_import_from(module, [(name, alias)], require=require)
return alias or name
def add_import_line(self, line: str) -> None:
"""Add a line of text to the import section, unless it's already there."""
if line not in self._import_lines:
self._import_lines.append(line)
def get_imports(self) -> str:
"""Return the import statements for the stub."""
imports = ""
if self._import_lines:
imports += "".join(self._import_lines)
imports += "".join(self.import_tracker.import_lines())
return imports
def output(self) -> str:
"""Return the text for the stub."""
pieces: list[str] = []
if imports := self.get_imports():
pieces.append(imports)
if dunder_all := self.get_dunder_all():
pieces.append(dunder_all)
if self._output:
pieces.append("".join(self._output))
return "\n".join(pieces)
def get_dunder_all(self) -> str:
"""Return the __all__ list for the stub."""
if self._all_:
# Note we emit all names in the runtime __all__ here, even if they
# don't actually exist. If that happens, the runtime has a bug, and
# it's not obvious what the correct behavior should be. We choose
# to reflect the runtime __all__ as closely as possible.
return f"__all__ = {self._all_!r}\n"
return ""
def add(self, string: str) -> None:
"""Add text to generated stub."""
self._output.append(string)
def is_top_level(self) -> bool:
"""Are we processing the top level of a file?"""
return self._indent == ""
def indent(self) -> None:
"""Add one level of indentation."""
self._indent += " "
def dedent(self) -> None:
"""Remove one level of indentation."""
self._indent = self._indent[:-4]
def record_name(self, name: str) -> None:
"""Mark a name as defined.
This only does anything if at the top level of a module.
"""
if self.is_top_level():
self._toplevel_names.append(name)
def is_recorded_name(self, name: str) -> bool:
"""Has this name been recorded previously?"""
return self.is_top_level() and name in self._toplevel_names
def set_defined_names(self, defined_names: set[str]) -> None:
self.defined_names = defined_names
# Names in __all__ are required
for name in self._all_ or ():
self.import_tracker.reexport(name)
for pkg, imports in self.known_imports.items():
for t in imports:
# require=False means that the import won't be added unless require_name() is called
# for the object during generation.
self.add_name(f"{pkg}.{t}", require=False)
def check_undefined_names(self) -> None:
undefined_names = [name for name in self._all_ or [] if name not in self._toplevel_names]
if undefined_names:
if self._output:
self.add("\n")
self.add("# Names in __all__ with no definition:\n")
for name in sorted(undefined_names):
self.add(f"# {name}\n")
def get_signatures(
self,
default_signature: FunctionSig,
sig_generators: list[SignatureGenerator],
func_ctx: FunctionContext,
) -> list[FunctionSig]:
for sig_gen in sig_generators:
inferred = sig_gen.get_function_sig(default_signature, func_ctx)
if inferred:
return inferred
return [default_signature]
def get_property_type(
self,
default_type: str | None,
sig_generators: list[SignatureGenerator],
func_ctx: FunctionContext,
) -> str | None:
for sig_gen in sig_generators:
inferred = sig_gen.get_property_type(default_type, func_ctx)
if inferred:
return inferred
return default_type
def format_func_def(
self,
sigs: list[FunctionSig],
is_coroutine: bool = False,
decorators: list[str] | None = None,
docstring: str | None = None,
) -> list[str]:
lines: list[str] = []
if decorators is None:
decorators = []
for signature in sigs:
# dump decorators, just before "def ..."
for deco in decorators:
lines.append(f"{self._indent}{deco}")
lines.append(
signature.format_sig(
indent=self._indent,
is_async=is_coroutine,
docstring=docstring,
include_docstrings=self._include_docstrings,
)
)
return lines
def format_type_args(self, o: TypeAliasStmt | FuncDef | ClassDef) -> str:
if not o.type_args:
return ""
p = AnnotationPrinter(self)
type_args_list: list[str] = []
for type_arg in o.type_args:
if type_arg.kind == PARAM_SPEC_KIND:
prefix = "**"
elif type_arg.kind == TYPE_VAR_TUPLE_KIND:
prefix = "*"
else:
prefix = ""
if type_arg.upper_bound:
bound_or_values = f": {type_arg.upper_bound.accept(p)}"
elif type_arg.values:
bound_or_values = f": ({', '.join(v.accept(p) for v in type_arg.values)})"
else:
bound_or_values = ""
if type_arg.default:
default = f" = {type_arg.default.accept(p)}"
else:
default = ""
type_args_list.append(f"{prefix}{type_arg.name}{bound_or_values}{default}")
return "[" + ", ".join(type_args_list) + "]"
def print_annotation(
self,
t: Type,
known_modules: list[str] | None = None,
local_modules: list[str] | None = None,
) -> str:
printer = AnnotationPrinter(self, known_modules, local_modules)
return t.accept(printer)
def is_not_in_all(self, name: str) -> bool:
if self.is_private_name(name):
return False
if self._all_:
return self.is_top_level() and name not in self._all_
return False
def is_private_name(self, name: str, fullname: str | None = None) -> bool:
if "__mypy-" in name:
return True # Never include mypy generated symbols
if self._include_private:
return False
if fullname in self.EXTRA_EXPORTED:
return False
if name == "_":
return False
if not name.startswith("_"):
return False
if self._all_ and name in self._all_:
return False
if name.startswith("__") and name.endswith("__"):
return name in self.IGNORED_DUNDERS
return True
def should_reexport(self, name: str, full_module: str, name_is_alias: bool) -> bool:
if (
not name_is_alias
and self.module_name
and (self.module_name + "." + name) in self.EXTRA_EXPORTED
):
# Special case certain names that should be exported, against our general rules.
return True
if name_is_alias:
return False
if self.export_less:
return False
if not self.module_name:
return False
is_private = self.is_private_name(name, full_module + "." + name)
if is_private:
return False
top_level = full_module.split(".")[0]
self_top_level = self.module_name.split(".", 1)[0]
if top_level not in (self_top_level, "_" + self_top_level):
# Export imports from the same package, since we can't reliably tell whether they
# are part of the public API.
return False
if self._all_:
return name in self._all_
return True
| BaseStubGenerator |
python | pyinstaller__pyinstaller | PyInstaller/lib/modulegraph/modulegraph.py | {
"start": 21023,
"end": 22678
} | class ____(Node):
"""
Graph node representing the aliasing of an existing source module under a
non-existent target module name (i.e., the desired alias).
"""
def __init__(self, name, node=None):
"""
Initialize this alias.
Parameters
----------
name : str
Fully-qualified name of the non-existent target module to be
created (as an alias of the existing source module).
node : Node
Graph node of the existing source module being aliased. Optional;
if not provided here, the attributes from referred node should
be copied later using `copyAttributesFromReferredNode` method.
"""
super(AliasNode, self).__init__(name)
# Copy attributes from referred node, if provided
self.copyAttributesFromReferredNode(node)
def copyAttributesFromReferredNode(self, node):
"""
Copy a subset of attributes from referred node (source module) into this target alias.
"""
# FIXME: Why only some? Why not *EVERYTHING* except "graphident", which
# must remain equal to "name" for lookup purposes? This is, after all,
# an alias. The idea is for the two nodes to effectively be the same.
for attr_name in (
'identifier', 'packagepath',
'_global_attr_names', '_starimported_ignored_module_names',
'_submodule_basename_to_node'):
if hasattr(node, attr_name):
setattr(self, attr_name, getattr(node, attr_name))
def infoTuple(self):
return (self.graphident, self.identifier)
| AliasNode |
python | ray-project__ray | python/ray/data/tests/test_namespace_expressions.py | {
"start": 21949,
"end": 22321
} | class ____:
"""Tests for proper error handling."""
def test_list_invalid_index_type(self):
"""Test list bracket notation rejects invalid types."""
with pytest.raises(TypeError, match="List indices must be integers or slices"):
col("items").list["invalid"]
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| TestNamespaceErrors |
python | getsentry__sentry | src/sentry/api/serializers/models/dashboard.py | {
"start": 1963,
"end": 2135
} | class ____(TypedDict):
orderby: list[dict[str, str]] | None
equations: list[dict[str, str | list[str]]] | None
selected_columns: list[str]
| WidgetChangedReasonType |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/vector.py | {
"start": 891,
"end": 1603
} | class ____(Enum):
"""Enum representing different types of vector distance metrics.
See :ref:`oracle_vector_datatype` for background.
.. versionadded:: 2.0.41
"""
EUCLIDEAN = "EUCLIDEAN"
"""Euclidean distance (L2 norm).
Measures the straight-line distance between two vectors in space.
"""
DOT = "DOT"
"""Dot product similarity.
Measures the algebraic similarity between two vectors.
"""
COSINE = "COSINE"
"""Cosine similarity.
Measures the cosine of the angle between two vectors.
"""
MANHATTAN = "MANHATTAN"
"""Manhattan distance (L1 norm).
Calculates the sum of absolute differences across dimensions.
"""
| VectorDistanceType |
python | pytorch__pytorch | torch/distributed/tensor/experimental/_tp_transform.py | {
"start": 2184,
"end": 20437
} | class ____(PassBase):
"""
This pass is responsible for transforming a single-device graph into a tensor parallel
graph. It will mark the OpSpec of each node in the graph, partition the graph into
distributed graph, then shard the parameters/buffers accordingly.
"""
def __init__(
self,
rank: int,
world_size: int,
device_type: str,
state_dict: dict[str, torch.Tensor],
graph_signature: ExportGraphSignature,
parallel_strategies: dict[str, ParallelStyle],
) -> None:
super().__init__()
self.rank = rank
self.mesh = DeviceMesh(device_type, torch.arange(world_size))
self.state_dict: dict[str, torch.Tensor] = state_dict
self.graph_signature = graph_signature
self.parallel_strategies = parallel_strategies
def call(self, graph_module) -> PassResult:
gm = copy.deepcopy(graph_module)
parameter_placements = _generate_parameter_and_buffer_placements(
list(self.state_dict.keys()), self.parallel_strategies
)
placement_strategies = _mark_sharding(
gm, self.graph_signature, self.mesh, parameter_placements
)
_partitioner(gm)
_shard_state_dict(
self.state_dict, placement_strategies, self.graph_signature, self.mesh
)
return PassResult(gm, True)
def _generate_parameter_and_buffer_placements(
params_and_buffers: list[str],
parallel_strategies: dict[str, ParallelStyle],
) -> dict[str, Placement]:
"""
Build parameter placements based on the give parallel style of linear layers.
"""
parameter_placements: dict[str, Placement] = {}
for linear_fqn, parallel_style in parallel_strategies.items():
weight_fqn = f"{linear_fqn}.weight"
bias_fqn = f"{linear_fqn}.bias"
assert weight_fqn in params_and_buffers
parameter_placements[weight_fqn] = (
Shard(0) if parallel_style == ColwiseParallel else Shard(1)
)
if bias_fqn in params_and_buffers:
parameter_placements[bias_fqn] = (
Shard(0) if parallel_style == ColwiseParallel else Replicate()
)
return parameter_placements
def _mark_tensor_parallel_shardings(
gm: GraphModule,
graph_signature: ExportGraphSignature,
mesh: DeviceMesh,
parameter_placements: dict[str, Placement],
) -> dict[Node, OpSpec]:
"""
Mark the placement strategies of the parameter and buffer placeholder nodes.
"""
placement_strategies: dict[Node, OpSpec] = {}
num_params_and_buffers = len(graph_signature.inputs_to_parameters) + len(
graph_signature.inputs_to_buffers
)
placeholder_idx: int = 0
for node in gm.graph.nodes:
if node.op == "placeholder":
if placeholder_idx < num_params_and_buffers:
fqn: str = _get_input_node_fqn(node.name, graph_signature)
placement: Placement = (
parameter_placements[fqn]
if fqn in parameter_placements
else Replicate()
)
placement_strategies[node] = _create_placement_strategy(
node,
mesh,
placements=(placement,),
)
placeholder_idx += 1
else:
placement_strategies[node] = _create_placement_strategy(
node,
mesh,
placements=(Replicate(),),
)
return placement_strategies
def _get_input_node_fqn(input_name: str, graph_signature: ExportGraphSignature) -> str:
"""
Return the FQN of an input node.
"""
if input_name in graph_signature.inputs_to_parameters:
return graph_signature.inputs_to_parameters[input_name]
elif input_name in graph_signature.inputs_to_buffers:
return graph_signature.inputs_to_buffers[input_name]
else:
raise ValueError(
f"{input_name} not found in inputs_to_parameters or inputs_to_buffers"
)
def _mark_sharding(
gm: GraphModule,
graph_signature: ExportGraphSignature,
mesh: DeviceMesh,
parameter_placements: dict[str, Placement],
) -> dict[Node, OpSpec]:
"""
Mark the sharding strategy for each node in the graph module.
"""
placement_strategies: dict[Node, OpSpec] = _mark_tensor_parallel_shardings(
gm,
graph_signature,
mesh,
parameter_placements,
)
for node in gm.graph.nodes:
if node.op == "placeholder":
if node not in placement_strategies:
placement_strategies[node] = _create_placement_strategy(
node, mesh, placements=(Replicate(),)
)
node.meta["sharding"] = placement_strategies[node]
elif node.op == "call_function":
if node.target is operator.getitem:
input_nodes = node.all_input_nodes
assert len(input_nodes) == 1, (
f"non-compute op only support one input now, found node: {node} with length of inputs: {len(node.args)}"
)
arg_strategy = placement_strategies[input_nodes[0]]
placement_strategies[node] = _create_placement_strategy(
node,
mesh,
placements=arg_strategy.output_spec.placements,
input_specs=_get_input_node_specs(node, placement_strategies),
)
node.meta["sharding"] = placement_strategies[node]
else:
op_schema = _get_op_schema(node, placement_strategies)
# get DTensor specs for inputs and outputs
if (
op_schema.op
not in DTensor._op_dispatcher.sharding_propagator.op_strategy_funcs
and op_schema.op
not in DTensor._op_dispatcher.sharding_propagator.op_to_rules
):
# Mark all as replicated
output_sharding = _generate_default_output_sharding(
node,
mesh,
op_schema,
)
else:
output_sharding = DTensor._op_dispatcher.sharding_propagator.propagate_op_sharding( # type: ignore[assignment]
op_schema,
)
placement_strategies[node] = OpSpec(
# pyrefly: ignore [bad-argument-type]
output_specs=_get_output_spec_from_output_sharding(output_sharding),
# pyrefly: ignore [missing-attribute]
input_specs=output_sharding.redistribute_schema.args_spec
# pyrefly: ignore [missing-attribute]
if output_sharding.redistribute_schema is not None
else _get_input_node_specs(node, placement_strategies),
)
node.meta["sharding"] = placement_strategies[node]
elif node.op == "output":
node.meta["sharding"] = None
else:
raise RuntimeError(f"op code {node.op} not supported")
return placement_strategies
def _get_output_spec_from_output_sharding(
output_sharding: OutputSharding,
) -> DTensorSpec:
"""
Util function to extract output spec from output sharding.
"""
if isinstance(output_sharding.output_spec, DTensorSpec):
return output_sharding.output_spec
else:
# For ops that return multiple outputs, the outputs should have the same output spec
assert isinstance(output_sharding.output_spec, Sequence)
assert output_sharding.output_spec[0] is not None
output_sharding.output_spec[0].tensor_meta = None
return output_sharding.output_spec[0]
def _create_placement_strategy(
node: Node,
mesh: DeviceMesh,
placements: tuple[Placement, ...],
input_specs: Sequence[DTensorSpec] | None = None,
) -> OpSpec:
"""
Util function to construct an OpSpec for a given node.
"""
placement = OpSpec(
input_specs=input_specs,
output_specs=DTensorSpec(
mesh=mesh,
placements=placements,
),
)
_populate_tensor_meta(node, placement.output_specs)
return placement
def _populate_tensor_meta(node: Node, output_spec: OutputSpecType) -> None:
"""
Util function to populate tensor meta of output_spec based on node metadata.
"""
if isinstance(node.meta["val"], Sequence):
assert isinstance(output_spec, Sequence)
for spec, fake_tensor in zip(output_spec, node.meta["val"]):
assert spec is not None
spec.tensor_meta = TensorMeta(
shape=fake_tensor.shape,
stride=fake_tensor.stride(),
dtype=fake_tensor.dtype,
)
else:
assert isinstance(output_spec, DTensorSpec)
output_spec.tensor_meta = TensorMeta(
shape=node.meta["val"].shape,
stride=node.meta["val"].stride(),
dtype=node.meta["val"].dtype,
)
def _generate_default_output_sharding(
node: Node,
mesh: DeviceMesh,
op_schema: OpSchema,
) -> OutputSharding:
"""
Util function to create a default output sharding that suggests Replicate placement for both args and outputs.
"""
def update_arg_spec(arg_spec: DTensorSpec) -> DTensorSpec:
return DTensorSpec(
mesh=arg_spec.mesh,
placements=(Replicate(),),
tensor_meta=arg_spec.tensor_meta,
)
new_op_schema = OpSchema(
op=op_schema.op,
args_schema=pytree.tree_map_only(
DTensorSpec, update_arg_spec, op_schema.args_schema
),
kwargs_schema=op_schema.kwargs_schema,
)
def create_output_spec(tensor: FakeTensor) -> DTensorSpec:
return DTensorSpec(
mesh=mesh,
placements=(Replicate(),),
tensor_meta=TensorMeta(
shape=tensor.shape,
stride=tensor.stride(),
dtype=tensor.dtype,
),
)
return OutputSharding(
output_spec=pytree.tree_map_only(
FakeTensor, create_output_spec, node.meta["val"]
),
redistribute_schema=new_op_schema,
needs_redistribute=True,
)
def _partitioner(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
"""
Graph partitioner that partitions the single device graph
to distributed graph
"""
for node in gm.graph.nodes:
node_sharding = node.meta["sharding"]
if node.op == "placeholder":
out_spec = node_sharding.output_spec
local_val = _partition_val(node.meta["val"], out_spec)
# update node value
node.meta["val"] = local_val
elif node.op == "call_function":
out_spec = node_sharding.output_spec
# check if there's misaligned sharding, insert reshard if there is
expected_input_specs = node_sharding.input_specs
for idx, input_arg in enumerate(node.all_input_nodes):
input_arg_sharding = input_arg.meta["sharding"]
input_arg_spec = input_arg_sharding.output_spec
desired_spec = (
out_spec
if expected_input_specs is None
else expected_input_specs[idx]
)
if input_arg_spec != desired_spec:
_insert_reshard_gm(
gm, node, input_arg, input_arg_spec, desired_spec
)
# convert output val to its local component
output_val = node.meta["val"]
node.meta["val"] = _partition_val(output_val, out_spec)
elif node.op == "output":
for input_arg in node.all_input_nodes:
# input args of output should be Replicate, otherwise redistribution is needed.
input_args_to_check: Sequence[Node] = (
input_arg if isinstance(input_arg, Sequence) else [input_arg]
)
for arg in input_args_to_check:
arg_sharding = arg.meta["sharding"]
arg_spec = arg_sharding.output_spec
desired_spec = copy.copy(arg_spec)
desired_spec.placements = (Replicate(),)
if arg_spec != desired_spec:
_insert_reshard_gm(gm, node, arg, arg_spec, desired_spec)
else:
raise RuntimeError(f"op code {node} not supported")
_clean_up_graph_metadata(gm)
gm.graph.lint()
gm.recompile()
return gm
def _partition_val(val: Any, spec: DTensorSpec) -> Any:
"""
util function to convert a full tensor val to its local component
"""
if isinstance(val, torch.Tensor):
local_shard = val
if val.ndim == 0:
# If it's already a scalar tensor, it is already local, we don't
# need to do anything
return local_shard
for idx, placement in enumerate(spec.placements):
if placement.is_shard():
placement = cast(Shard, placement)
num_chunks = spec.mesh.size(mesh_dim=idx)
my_coord = spec.mesh.get_coordinate()
assert my_coord is not None, "current rank not in mesh!"
my_coord_on_mesh_dim = my_coord[idx]
local_shard = placement._split_tensor(
local_shard, num_chunks, with_padding=False, contiguous=True
)[0][my_coord_on_mesh_dim]
return local_shard
elif isinstance(val, (list, tuple)):
return val.__class__(_partition_val(v, spec) for v in val)
else:
raise RuntimeError(f"val type {type(val)} not supported")
def _insert_reshard_gm(
gm: torch.fx.GraphModule,
node: Node,
input_arg: Node,
input_arg_spec: DTensorSpec,
desired_spec: DTensorSpec,
) -> None:
"""
Transform the graph for tensor redistribution.
"""
input_arg_spec.tensor_meta = input_arg.meta["tensor_meta"]
desired_spec.tensor_meta = input_arg.meta["tensor_meta"]
input_arg_tensor = input_arg.meta["val"]
# insert reshard operation
def reshard_fn(local_tensor: torch.Tensor) -> torch.Tensor:
return redistribute_local_tensor(
local_tensor,
input_arg_spec,
desired_spec,
)
reshard_gm = make_fx(reshard_fn)(input_arg_tensor)
reshard_gm_nodes = list(reshard_gm.graph.nodes)
input_node = reshard_gm_nodes[0]
with gm.graph.inserting_before(node):
# copy nn_module_stack metadata for output, all-reduce nodes
for reshard_node in reshard_gm.graph.nodes:
if reshard_node.op not in ["placeholder", "output"]:
reshard_node.meta["nn_module_stack"] = (
copy.copy(input_arg.meta["nn_module_stack"])
if input_arg.op != "placeholder"
else copy.copy(node.meta["nn_module_stack"])
)
output_node = gm.graph.graph_copy(
reshard_gm.graph,
val_map={
input_node: input_arg,
},
)
node.replace_input_with(input_arg, output_node) # type: ignore[arg-type]
def _clean_up_graph_metadata(gm: torch.fx.GraphModule) -> None:
"""
Clean up the graph by removing sharding and partitioning related metadata
"""
for node in gm.graph.nodes:
if "sharding" in node.meta:
del node.meta["sharding"]
if "val" in node.meta and isinstance(node.meta["val"], torch.Tensor):
local_tensor_meta = _extract_tensor_metadata(node.meta["val"])
node.meta["tensor_meta"] = local_tensor_meta
def _get_input_node_specs(
node: Node, placement_strategies: dict[Node, OpSpec]
) -> tuple[DTensorSpec, ...]:
"""
Get the input specs of a node.
"""
input_specs_list: list[DTensorSpec] = []
for input_arg in node.all_input_nodes:
if input_arg in placement_strategies:
output_spec = placement_strategies[input_arg].output_specs
assert isinstance(output_spec, DTensorSpec)
input_specs_list.append(output_spec)
else:
raise ValueError(f"{input_arg} does not have output_spec populated.")
return tuple(input_specs_list)
def _get_op_schema(node: Node, placement_strategies: dict[Node, OpSpec]) -> OpSchema:
"""
Util function to construct the operator schema of a node.
"""
args_schema_list = pytree.tree_map_only(
Node, lambda arg: placement_strategies[arg].output_specs, node.args
)
op_schema = OpSchema(
op=cast(torch._ops.OpOverload, node.target),
args_schema=tuple(args_schema_list),
kwargs_schema=cast(dict[str, object], node.kwargs),
)
return op_schema
def _shard_state_dict(
state_dict: dict[str, torch.Tensor],
placement_strategies: dict[Node, OpSpec],
graph_signature: ExportGraphSignature,
mesh: DeviceMesh,
) -> None:
"""
Inplace partition the weights based on the OpSpec
"""
for node, op_spec in placement_strategies.items():
if node.op != "placeholder":
continue
if node.name in graph_signature.inputs_to_parameters:
fqn = graph_signature.inputs_to_parameters[node.name]
elif node.name in graph_signature.inputs_to_buffers:
fqn = graph_signature.inputs_to_buffers[node.name]
else:
continue
assert fqn in state_dict, f"{fqn} not found in state dict: {state_dict.keys()}"
original_param = state_dict[fqn]
dtensor_param = distribute_tensor(
original_param,
mesh,
op_spec.output_spec.placements,
)
local_param = dtensor_param.to_local()
state_dict[fqn] = (
torch.nn.Parameter(local_param)
if isinstance(original_param, torch.nn.Parameter)
else local_param
)
| _TensorParallelTransformPass |
python | google__pytype | pytype/tools/config.py | {
"start": 851,
"end": 1216
} | class ____(abc.ABC):
"""A section of a config file."""
@classmethod
@abc.abstractmethod
def create_from_file(
cls: type[_ConfigSectionT], filepath: str, section: str
) -> _ConfigSectionT:
"""Create a ConfigSection if the file at filepath has section."""
@abc.abstractmethod
def items(self) -> Iterable[tuple[str, str]]:
...
| ConfigSection |
python | django__django | tests/migrations/migrations_test_apps/lookuperror_a/models.py | {
"start": 101,
"end": 253
} | class ____(models.Model):
b2 = models.ForeignKey("lookuperror_b.B2", models.CASCADE)
c2 = models.ForeignKey("lookuperror_c.C2", models.CASCADE)
| A3 |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 350298,
"end": 351532
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"git_hub_services_sha",
"git_ip_addresses",
"hook_ip_addresses",
"importer_ip_addresses",
"is_password_authentication_verifiable",
"pages_ip_addresses",
)
git_hub_services_sha = sgqlc.types.Field(
sgqlc.types.non_null(GitObjectID), graphql_name="gitHubServicesSha"
)
git_ip_addresses = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="gitIpAddresses"
)
hook_ip_addresses = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="hookIpAddresses",
)
importer_ip_addresses = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="importerIpAddresses",
)
is_password_authentication_verifiable = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isPasswordAuthenticationVerifiable"
)
pages_ip_addresses = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="pagesIpAddresses",
)
| GitHubMetadata |
python | scipy__scipy | scipy/integrate/tests/test_cubature.py | {
"start": 36604,
"end": 37039
} | class ____(Rule):
"""
A rule with fake high error so that cubature will keep on subdividing.
"""
def estimate(self, f, a, b, args=()):
xp = array_namespace(a, b)
underlying = GaussLegendreQuadrature(10, xp=xp)
return underlying.estimate(f, a, b, args)
def estimate_error(self, f, a, b, args=()):
xp = array_namespace(a, b)
return xp.asarray(1e6, dtype=xp.float64)
| BadErrorRule |
python | ray-project__ray | rllib/core/rl_module/rl_module.py | {
"start": 32188,
"end": 33885
} | class ____:
observation_space: gym.Space = None
action_space: gym.Space = None
inference_only: bool = False
learner_only: bool = False
model_config_dict: Dict[str, Any] = field(default_factory=dict)
catalog_class: Type["Catalog"] = None
def get_catalog(self) -> Optional["Catalog"]:
if self.catalog_class is not None:
return self.catalog_class(
observation_space=self.observation_space,
action_space=self.action_space,
model_config_dict=self.model_config_dict,
)
return None
def to_dict(self):
catalog_class_path = (
serialize_type(self.catalog_class) if self.catalog_class else ""
)
return {
"observation_space": gym_space_to_dict(self.observation_space),
"action_space": gym_space_to_dict(self.action_space),
"inference_only": self.inference_only,
"learner_only": self.learner_only,
"model_config_dict": self.model_config_dict,
"catalog_class_path": catalog_class_path,
}
@classmethod
def from_dict(cls, d: Dict[str, Any]):
catalog_class = (
None
if d["catalog_class_path"] == ""
else deserialize_type(d["catalog_class_path"])
)
return cls(
observation_space=gym_space_from_dict(d["observation_space"]),
action_space=gym_space_from_dict(d["action_space"]),
inference_only=d["inference_only"],
learner_only=d["learner_only"],
model_config_dict=d["model_config_dict"],
catalog_class=catalog_class,
)
| RLModuleConfig |
python | google__jax | tests/pallas/triton_pallas_test.py | {
"start": 1169,
"end": 1864
} | class ____(jtu.JaxTestCase):
INTERPRET = False
def setUp(self):
if jtu.test_device_matches(["cpu"]):
if not self.INTERPRET:
self.skipTest("On CPU the test works only in interpret mode")
elif jtu.test_device_matches(["gpu"]):
if not jtu.is_cuda_compute_capability_at_least("9.0"):
self.skipTest("Only works on GPU with capability >= sm90")
else:
self.skipTest("Test only works on CPU and GPU")
super().setUp()
def pallas_call(self, *args, **kwargs):
return pl.pallas_call(*args, **kwargs, interpret=self.INTERPRET)
DTYPE_LIST = [jnp.float32, jnp.float16, jnp.bfloat16,
jnp.float8_e4m3fn, jnp.float8_e5m2]
| PallasBaseTest |
python | huggingface__transformers | src/transformers/models/splinter/tokenization_splinter.py | {
"start": 1368,
"end": 7787
} | class ____(TokenizersBackend):
r"""
Construct a Splinter tokenizer (backed by HuggingFace's tokenizers library). Based on WordPiece.
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to a vocabulary file.
tokenizer_file (`str`, *optional*):
Path to a tokenizers JSON file containing the serialization of a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values.
question_token (`str`, *optional*, defaults to `"[QUESTION]"`):
The token used for constructing question representations.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase`.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, a minimal vocabulary is created.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
do_lower_case: bool = True,
unk_token: str = "[UNK]",
sep_token: str = "[SEP]",
pad_token: str = "[PAD]",
cls_token: str = "[CLS]",
mask_token: str = "[MASK]",
question_token: str = "[QUESTION]",
tokenize_chinese_chars: bool = True,
strip_accents: Optional[bool] = None,
vocab: Optional[dict] = None,
**kwargs,
):
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {
str(pad_token): 0,
str(unk_token): 1,
str(cls_token): 2,
str(sep_token): 3,
str(mask_token): 4,
str(question_token): 5,
".": 6,
}
self._tokenizer = Tokenizer(WordPiece(self._vocab, unk_token=str(unk_token)))
self._tokenizer.normalizer = normalizers.BertNormalizer(
clean_text=True,
handle_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
lowercase=do_lower_case,
)
self._tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
self._tokenizer.decoder = decoders.WordPiece(prefix="##")
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
question_token=question_token,
do_lower_case=do_lower_case,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
if hasattr(self, "_tokenizer") and self._tokenizer.normalizer is not None:
import json
pre_tok_state = json.loads(self._tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
or pre_tok_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
):
pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
pre_tok_state["lowercase"] = do_lower_case
pre_tok_state["strip_accents"] = strip_accents
pre_tok_state["handle_chinese_chars"] = tokenize_chinese_chars
self._tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
self.question_token = question_token
if self.question_token not in self.all_special_tokens:
self.add_tokens([self.question_token], special_tokens=True)
self.update_post_processor()
@property
def question_token_id(self):
return self.convert_tokens_to_ids(self.question_token)
def update_post_processor(self):
cls = self.cls_token
sep = self.sep_token
question = self.question_token
dot = "."
cls_token_id = self.cls_token_id
sep_token_id = self.sep_token_id
question_token_id = self.question_token_id
dot_token_id = self.convert_tokens_to_ids(".")
if cls is None or sep is None:
return
if self.padding_side == "right":
pair = f"{cls}:0 $A:0 {question} {dot} {sep}:0 $B:1 {sep}:1"
else:
pair = f"{cls}:0 $A:0 {sep}:0 $B:1 {question} {dot} {sep}:1"
self._tokenizer.post_processor = processors.TemplateProcessing(
single=f"{cls}:0 $A:0 {sep}:0",
pair=pair,
special_tokens=[
(cls, cls_token_id),
(sep, sep_token_id),
(question, question_token_id),
(dot, dot_token_id),
],
)
__all__ = ["SplinterTokenizer"]
| SplinterTokenizer |
python | anthropics__anthropic-sdk-python | src/anthropic/types/cache_creation.py | {
"start": 150,
"end": 407
} | class ____(BaseModel):
ephemeral_1h_input_tokens: int
"""The number of input tokens used to create the 1 hour cache entry."""
ephemeral_5m_input_tokens: int
"""The number of input tokens used to create the 5 minute cache entry."""
| CacheCreation |
python | PyCQA__pylint | doc/data/messages/u/useless-parent-delegation/good.py | {
"start": 73,
"end": 198
} | class ____(Animal):
"""There is no need to override 'eat' it has the same signature as the implementation in Animal."""
| Human |
python | astropy__astropy | astropy/cosmology/_src/flrw/w0wzcdm.py | {
"start": 543,
"end": 6728
} | class ____(FLRW):
"""FLRW cosmology with a variable dark energy EoS and curvature.
The equation for the dark energy equation of state (EoS) uses the simple form:
:math:`w(z) = w_0 + w_z z`.
This form is not recommended for z > 1.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0. This is pressure/density for
dark energy in units where c=1.
wz : float, optional
Derivative of the dark energy equation of state with respect to z.
A cosmological constant has w0=-1.0 and wz=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0wzCDM
>>> cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wz=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0: Parameter = Parameter(
default=-1.0, doc="Dark energy equation of state at z=0.", fvalidate="float"
)
wz: Parameter = Parameter(
default=0.0,
doc="Derivative of the dark energy equation of state w.r.t. z.",
fvalidate="float",
)
def __post_init__(self) -> None:
super().__post_init__()
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self.Tcmb0.value == 0:
inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_norel
inv_efunc_scalar_args = (
self.Om0,
self.Ode0,
self.Ok0,
self.w0,
self.wz,
)
elif not self._nu_info.has_massive_nu:
inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc_nomnu
inv_efunc_scalar_args = (
self.Om0,
self.Ode0,
self.Ok0,
self.Ogamma0 + self.Onu0,
self.w0,
self.wz,
)
else:
inv_efunc_scalar = scalar_inv_efuncs.w0wzcdm_inv_efunc
inv_efunc_scalar_args = (
self.Om0,
self.Ode0,
self.Ok0,
self.Ogamma0,
self._nu_info.neff_per_nu,
self._nu_info.n_massless_nu,
self._nu_info.nu_y_list,
self.w0,
self.wz,
)
object.__setattr__(self, "_inv_efunc_scalar", inv_efunc_scalar)
object.__setattr__(self, "_inv_efunc_scalar_args", inv_efunc_scalar_args)
def w(self, z: Quantity | ArrayLike, /) -> FArray:
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'] or array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
w : ndarray
The dark energy equation of state.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is given by :math:`w(z) = w_0 + w_z z`.
"""
return self.w0 + self.wz * aszarr(z)
def de_density_scale(self, z: Quantity | ArrayLike, /) -> FArray:
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'] or array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
I : ndarray
The scaling of the energy density of dark energy with redshift.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the Universe.
Physics Review Letters, 90(9), 091301.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by ([1]_)
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 - w_z\right)}
\exp \left(3 w_z z\right)
"""
z = aszarr(z)
return (z + 1.0) ** (3.0 * (1.0 + self.w0 - self.wz)) * exp(3.0 * self.wz * z)
@dataclass_decorator
| w0wzCDM |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/forms.py | {
"start": 4036,
"end": 4096
} | class ____(ReprForm):
_url = forms.URLField()
| URLFieldForm |
python | huggingface__transformers | tests/models/squeezebert/test_modeling_squeezebert.py | {
"start": 11466,
"end": 12065
} | class ____(unittest.TestCase):
@slow
def test_inference_classification_head(self):
model = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
input_ids = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[0.6401, -0.0349, -0.6041]])
torch.testing.assert_close(output, expected_tensor, rtol=1e-4, atol=1e-4)
| SqueezeBertModelIntegrationTest |
python | scipy__scipy | scipy/interpolate/_fitpack_repro.py | {
"start": 19409,
"end": 23127
} | class ____:
""" The r.h.s. of ``f(p) = s``.
Given scalar `p`, we solve the system of equations in the LSQ sense:
| A | @ | c | = | y |
| B / p | | 0 | | 0 |
where `A` is the matrix of b-splines and `b` is the discontinuity matrix
(the jumps of the k-th derivatives of b-spline basis elements at knots).
Since we do that repeatedly while minimizing over `p`, we QR-factorize
`A` only once and update the QR factorization only of the `B` rows of the
augmented matrix |A, B/p|.
The system of equations is Eq. (15) Ref. [1]_, the strategy and implementation
follows that of FITPACK, see specific links below.
References
----------
[1] P. Dierckx, Algorithms for Smoothing Data with Periodic and Parametric Splines,
COMPUTER GRAPHICS AND IMAGE PROCESSING vol. 20, pp 171-184 (1982.)
https://doi.org/10.1016/0146-664X(82)90043-0
"""
def __init__(self, x, y, t, k, s, w=None, *, R=None, Y=None):
self.x = x
self.y = y
self.t = t
self.k = k
w = np.ones_like(x, dtype=float) if w is None else w
if w.ndim != 1:
raise ValueError(f"{w.ndim = } != 1.")
self.w = w
self.s = s
if y.ndim != 2:
raise ValueError(f"F: expected y.ndim == 2, got {y.ndim = } instead.")
# ### precompute what we can ###
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L250
# c evaluate the discontinuity jump of the kth derivative of the
# c b-splines at the knots t(l),l=k+2,...n-k-1 and store in b.
b, b_offset, b_nc = disc(t, k)
# the QR factorization of the data matrix, if not provided
# NB: otherwise, must be consistent with x,y & s, but this is not checked
if R is None and Y is None:
R, Y, _, _, _ = _lsq_solve_qr(x, y, t, k, w)
# prepare to combine R and the discontinuity matrix (AB); also r.h.s. (YY)
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L269
# c the rows of matrix b with weight 1/p are rotated into the
# c triangularised observation matrix a which is stored in g.
nc = t.shape[0] - k - 1
nz = k + 1
if R.shape[1] != nz:
raise ValueError(f"Internal error: {R.shape[1] =} != {k+1 =}.")
# r.h.s. of the augmented system
z = np.zeros((b.shape[0], Y.shape[1]), dtype=float)
self.YY = np.r_[Y[:nc], z]
# l.h.s. of the augmented system
AA = np.zeros((nc + b.shape[0], self.k+2), dtype=float)
AA[:nc, :nz] = R[:nc, :]
# AA[nc:, :] = b.a / p # done in __call__(self, p)
self.AA = AA
self.offset = np.r_[np.arange(nc, dtype=np.int64), b_offset]
self.nc = nc
self.b = b
def __call__(self, p):
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L279
# c the row of matrix b is rotated into triangle by givens transformation
# copy the precomputed matrices over for in-place work
# R = PackedMatrix(self.AB.a.copy(), self.AB.offset.copy(), nc)
AB = self.AA.copy()
offset = self.offset.copy()
nc = self.nc
AB[nc:, :] = self.b / p
QY = self.YY.copy()
# heavy lifting happens here, in-place
_dierckx.qr_reduce(AB, offset, nc, QY, startrow=nc)
# solve for the coefficients
c, _, fp = _dierckx.fpback(AB, nc, self.x, self.y, self.t, self.k, self.w, QY)
spl = BSpline(self.t, c, self.k)
self.spl = spl # store it
return fp - self.s
| F |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 1134,
"end": 1185
} | class ____(ParkingLot4, Place):
pass
| ParkingLot4A |
python | walkccc__LeetCode | solutions/1526. Minimum Number of Increments on Subarrays to Form a Target Array/1526.py | {
"start": 0,
"end": 190
} | class ____:
def minNumberOperations(self, target: list[int]) -> int:
ans = target[0]
for a, b in zip(target, target[1:]):
if a < b:
ans += b - a
return ans
| Solution |
python | mlflow__mlflow | tests/crewai/test_crewai_autolog.py | {
"start": 2736,
"end": 4535
} | class ____(int):
def __eq__(self, other):
return isinstance(other, int)
ANY_INT = AnyInt()
# CrewAI >= 0.175.0 changed behavior: TaskOutput.name falls back to description when None
# See: https://github.com/crewAIInc/crewAI/pull/3382
_CREWAI_VERSION = Version(crewai.__version__)
_TASK_DESCRIPTION = "Analyze and select the best city for the trip"
_TASK_DESCRIPTION_2 = "Compile an in-depth guide"
def _get_expected_task_name(description: str) -> str | None:
"""Get expected task name based on CrewAI version."""
return description if _CREWAI_VERSION >= Version("0.175.0") else None
_TASK_NAME = _get_expected_task_name(_TASK_DESCRIPTION)
_TASK_NAME_2 = _get_expected_task_name(_TASK_DESCRIPTION_2)
_CREW_OUTPUT = {
"json_dict": None,
"pydantic": None,
"raw": _LLM_ANSWER,
"tasks_output": ANY,
"token_usage": {
"cached_prompt_tokens": ANY_INT,
"completion_tokens": ANY_INT,
"prompt_tokens": ANY_INT,
"successful_requests": ANY_INT,
"total_tokens": ANY_INT,
},
}
_AGENT_1_GOAL = "Select the best city based on weather, season, and prices"
_AGENT_1_BACKSTORY = "An expert in analyzing travel data to pick ideal destinations"
@pytest.fixture
def simple_agent_1():
return Agent(
role="City Selection Expert",
goal=_AGENT_1_GOAL,
backstory=_AGENT_1_BACKSTORY,
tools=[],
llm=llm(),
)
_AGENT_2_GOAL = "Provide the BEST insights about the selected city"
@pytest.fixture
def simple_agent_2():
return Agent(
role="Local Expert at this city",
goal=_AGENT_2_GOAL,
backstory="""A knowledgeable local guide with extensive information
about the city, it's attractions and customs""",
tools=[],
llm=llm(),
)
| AnyInt |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 352411,
"end": 353369
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"email",
"is_valid",
"payload",
"signature",
"signer",
"state",
"was_signed_by_git_hub",
)
email = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="email")
is_valid = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isValid")
payload = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="payload")
signature = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="signature"
)
signer = sgqlc.types.Field("User", graphql_name="signer")
state = sgqlc.types.Field(
sgqlc.types.non_null(GitSignatureState), graphql_name="state"
)
was_signed_by_git_hub = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="wasSignedByGitHub"
)
| GitSignature |
python | sdispater__pendulum | src/pendulum/datetime.py | {
"start": 1455,
"end": 42361
} | class ____(datetime.datetime, Date):
EPOCH: ClassVar[DateTime]
min: ClassVar[DateTime]
max: ClassVar[DateTime]
# Formats
_FORMATS: ClassVar[dict[str, str | Callable[[datetime.datetime], str]]] = {
"atom": ATOM,
"cookie": COOKIE,
"iso8601": lambda dt: dt.isoformat("T"),
"rfc822": RFC822,
"rfc850": RFC850,
"rfc1036": RFC1036,
"rfc1123": RFC1123,
"rfc2822": RFC2822,
"rfc3339": lambda dt: dt.isoformat("T"),
"rss": RSS,
"w3c": W3C,
}
_MODIFIERS_VALID_UNITS: ClassVar[list[str]] = [
"second",
"minute",
"hour",
"day",
"week",
"month",
"year",
"decade",
"century",
]
_EPOCH: datetime.datetime = datetime.datetime(1970, 1, 1, tzinfo=UTC)
@classmethod
def create(
cls,
year: SupportsIndex,
month: SupportsIndex,
day: SupportsIndex,
hour: SupportsIndex = 0,
minute: SupportsIndex = 0,
second: SupportsIndex = 0,
microsecond: SupportsIndex = 0,
tz: str | float | Timezone | FixedTimezone | None | datetime.tzinfo = UTC,
fold: int = 1,
raise_on_unknown_times: bool = False,
) -> Self:
"""
Creates a new DateTime instance from a specific date and time.
"""
if tz is not None:
tz = pendulum._safe_timezone(tz)
dt = datetime.datetime(
year, month, day, hour, minute, second, microsecond, fold=fold
)
if tz is not None:
dt = tz.convert(dt, raise_on_unknown_times=raise_on_unknown_times)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo=dt.tzinfo,
fold=dt.fold,
)
@classmethod
def instance(
cls,
dt: datetime.datetime,
tz: str | Timezone | FixedTimezone | datetime.tzinfo | None = UTC,
) -> Self:
tz = dt.tzinfo or tz
if tz is not None:
tz = pendulum._safe_timezone(tz, dt=dt)
return cls.create(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tz=tz,
fold=dt.fold,
)
@overload
@classmethod
def now(cls, tz: datetime.tzinfo | None = None) -> Self: ...
@overload
@classmethod
def now(cls, tz: str | Timezone | FixedTimezone | None = None) -> Self: ...
@classmethod
def now(
cls, tz: str | Timezone | FixedTimezone | datetime.tzinfo | None = None
) -> Self:
"""
Get a DateTime instance for the current date and time.
"""
if tz is None or tz == "local":
dt = datetime.datetime.now(local_timezone())
elif tz is UTC or tz == "UTC":
dt = datetime.datetime.now(UTC)
else:
dt = datetime.datetime.now(UTC)
tz = pendulum._safe_timezone(tz)
dt = dt.astimezone(tz)
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo=dt.tzinfo,
fold=dt.fold,
)
@classmethod
def utcnow(cls) -> Self:
"""
Get a DateTime instance for the current date and time in UTC.
"""
return cls.now(UTC)
@classmethod
def today(cls) -> Self:
return cls.now()
@classmethod
def strptime(cls, time: str, fmt: str) -> Self:
return cls.instance(datetime.datetime.strptime(time, fmt))
# Getters/Setters
def set(
self,
year: int | None = None,
month: int | None = None,
day: int | None = None,
hour: int | None = None,
minute: int | None = None,
second: int | None = None,
microsecond: int | None = None,
tz: str | float | Timezone | FixedTimezone | datetime.tzinfo | None = None,
) -> Self:
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tz is None:
tz = self.tz
return self.__class__.create(
year, month, day, hour, minute, second, microsecond, tz=tz, fold=self.fold
)
@property
def float_timestamp(self) -> float:
return self.timestamp()
@property
def int_timestamp(self) -> int:
# Workaround needed to avoid inaccuracy
# for far into the future datetimes
dt = datetime.datetime(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
tzinfo=self.tzinfo,
fold=self.fold,
)
delta = dt - self._EPOCH
return delta.days * SECONDS_PER_DAY + delta.seconds
@property
def offset(self) -> int | None:
return self.get_offset()
@property
def offset_hours(self) -> float | None:
offset = self.get_offset()
if offset is None:
return None
return offset / SECONDS_PER_MINUTE / MINUTES_PER_HOUR
@property
def timezone(self) -> Timezone | FixedTimezone | None:
if not isinstance(self.tzinfo, (Timezone, FixedTimezone)):
return None
return self.tzinfo
@property
def tz(self) -> Timezone | FixedTimezone | None:
return self.timezone
@property
def timezone_name(self) -> str | None:
tz = self.timezone
if tz is None:
return None
return tz.name
@property
def age(self) -> int:
return self.date().diff(self.now(self.tz).date(), abs=False).in_years()
def is_local(self) -> bool:
return self.offset == self.in_timezone(pendulum.local_timezone()).offset
def is_utc(self) -> bool:
return self.offset == 0
def is_dst(self) -> bool:
return self.dst() != datetime.timedelta()
def get_offset(self) -> int | None:
utcoffset = self.utcoffset()
if utcoffset is None:
return None
return int(utcoffset.total_seconds())
def date(self) -> Date:
return Date(self.year, self.month, self.day)
def time(self) -> Time:
return Time(self.hour, self.minute, self.second, self.microsecond)
def naive(self) -> Self:
"""
Return the DateTime without timezone information.
"""
return self.__class__(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
)
def on(self, year: int, month: int, day: int) -> Self:
"""
Returns a new instance with the current date set to a different date.
"""
return self.set(year=int(year), month=int(month), day=int(day))
def at(
self, hour: int, minute: int = 0, second: int = 0, microsecond: int = 0
) -> Self:
"""
Returns a new instance with the current time to a different time.
"""
return self.set(
hour=hour, minute=minute, second=second, microsecond=microsecond
)
def in_timezone(self, tz: str | Timezone | FixedTimezone) -> Self:
"""
Set the instance's timezone from a string or object.
"""
tz = pendulum._safe_timezone(tz)
dt = self
if not self.timezone:
dt = dt.replace(fold=1)
return tz.convert(dt)
def in_tz(self, tz: str | Timezone | FixedTimezone) -> Self:
"""
Set the instance's timezone from a string or object.
"""
return self.in_timezone(tz)
# STRING FORMATTING
def to_time_string(self) -> str:
"""
Format the instance as time.
"""
return self.format("HH:mm:ss")
def to_datetime_string(self) -> str:
"""
Format the instance as date and time.
"""
return self.format("YYYY-MM-DD HH:mm:ss")
def to_day_datetime_string(self) -> str:
"""
Format the instance as day, date and time (in english).
"""
return self.format("ddd, MMM D, YYYY h:mm A", locale="en")
def to_atom_string(self) -> str:
"""
Format the instance as ATOM.
"""
return self._to_string("atom")
def to_cookie_string(self) -> str:
"""
Format the instance as COOKIE.
"""
return self._to_string("cookie", locale="en")
def to_iso8601_string(self) -> str:
"""
Format the instance as ISO 8601.
"""
string = self._to_string("iso8601")
if self.tz and self.tz.name == "UTC":
string = string.replace("+00:00", "Z")
return string
def to_rfc822_string(self) -> str:
"""
Format the instance as RFC 822.
"""
return self._to_string("rfc822")
def to_rfc850_string(self) -> str:
"""
Format the instance as RFC 850.
"""
return self._to_string("rfc850")
def to_rfc1036_string(self) -> str:
"""
Format the instance as RFC 1036.
"""
return self._to_string("rfc1036")
def to_rfc1123_string(self) -> str:
"""
Format the instance as RFC 1123.
"""
return self._to_string("rfc1123")
def to_rfc2822_string(self) -> str:
"""
Format the instance as RFC 2822.
"""
return self._to_string("rfc2822")
def to_rfc3339_string(self) -> str:
"""
Format the instance as RFC 3339.
"""
return self._to_string("rfc3339")
def to_rss_string(self) -> str:
"""
Format the instance as RSS.
"""
return self._to_string("rss")
def to_w3c_string(self) -> str:
"""
Format the instance as W3C.
"""
return self._to_string("w3c")
def _to_string(self, fmt: str, locale: str | None = None) -> str:
"""
Format the instance to a common string format.
"""
if fmt not in self._FORMATS:
raise ValueError(f"Format [{fmt}] is not supported")
fmt_value = self._FORMATS[fmt]
if callable(fmt_value):
return fmt_value(self)
return self.format(fmt_value, locale=locale)
def __str__(self) -> str:
return self.isoformat(" ")
def __repr__(self) -> str:
us = ""
if self.microsecond:
us = f", {self.microsecond}"
repr_ = "{klass}({year}, {month}, {day}, {hour}, {minute}, {second}{us}"
if self.tzinfo is not None:
repr_ += ", tzinfo={tzinfo}"
repr_ += ")"
return repr_.format(
klass=self.__class__.__name__,
year=self.year,
month=self.month,
day=self.day,
hour=self.hour,
minute=self.minute,
second=self.second,
us=us,
tzinfo=repr(self.tzinfo),
)
# Comparisons
def closest(self, *dts: datetime.datetime) -> Self: # type: ignore[override]
"""
Get the closest date to the instance.
"""
pdts = [self.instance(x) for x in dts]
return min((abs(self - dt), dt) for dt in pdts)[1]
def farthest(self, *dts: datetime.datetime) -> Self: # type: ignore[override]
"""
Get the farthest date from the instance.
"""
pdts = [self.instance(x) for x in dts]
return max((abs(self - dt), dt) for dt in pdts)[1]
def is_future(self) -> bool:
"""
Determines if the instance is in the future, ie. greater than now.
"""
return self > self.now(self.timezone)
def is_past(self) -> bool:
"""
Determines if the instance is in the past, ie. less than now.
"""
return self < self.now(self.timezone)
def is_long_year(self) -> bool:
"""
Determines if the instance is a long year
See link `https://en.wikipedia.org/wiki/ISO_8601#Week_dates`_
"""
return (
DateTime.create(self.year, 12, 28, 0, 0, 0, tz=self.tz).isocalendar()[1]
== 53
)
def is_same_day(self, dt: datetime.datetime) -> bool: # type: ignore[override]
"""
Checks if the passed in date is the same day
as the instance current day.
"""
dt = self.instance(dt)
return self.to_date_string() == dt.to_date_string()
def is_anniversary( # type: ignore[override]
self, dt: datetime.datetime | None = None
) -> bool:
"""
Check if its the anniversary.
Compares the date/month values of the two dates.
"""
if dt is None:
dt = self.now(self.tz)
instance = self.instance(dt)
return (self.month, self.day) == (instance.month, instance.day)
# ADDITIONS AND SUBSTRACTIONS
def add(
self,
years: int = 0,
months: int = 0,
weeks: int = 0,
days: int = 0,
hours: int = 0,
minutes: int = 0,
seconds: float = 0,
microseconds: int = 0,
) -> Self:
"""
Add a duration to the instance.
If we're adding units of variable length (i.e., years, months),
move forward from current time, otherwise move forward from utc, for accuracy
when moving across DST boundaries.
"""
units_of_variable_length = any([years, months, weeks, days])
current_dt = datetime.datetime(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
)
if not units_of_variable_length:
offset = self.utcoffset()
if offset:
current_dt = current_dt - offset
dt = add_duration(
current_dt,
years=years,
months=months,
weeks=weeks,
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds,
)
if units_of_variable_length or self.tz is None:
return self.__class__.create(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tz=self.tz,
)
dt = datetime.datetime(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo=UTC,
)
dt = self.tz.convert(dt)
return self.__class__(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tzinfo=self.tz,
fold=dt.fold,
)
def subtract(
self,
years: int = 0,
months: int = 0,
weeks: int = 0,
days: int = 0,
hours: int = 0,
minutes: int = 0,
seconds: float = 0,
microseconds: int = 0,
) -> Self:
"""
Remove duration from the instance.
"""
return self.add(
years=-years,
months=-months,
weeks=-weeks,
days=-days,
hours=-hours,
minutes=-minutes,
seconds=-seconds,
microseconds=-microseconds,
)
# Adding a final underscore to the method name
# to avoid errors for PyPy which already defines
# a _add_timedelta method
def _add_timedelta_(self, delta: datetime.timedelta) -> Self:
"""
Add timedelta duration to the instance.
"""
if isinstance(delta, pendulum.Interval):
return self.add(
years=delta.years,
months=delta.months,
weeks=delta.weeks,
days=delta.remaining_days,
hours=delta.hours,
minutes=delta.minutes,
seconds=delta.remaining_seconds,
microseconds=delta.microseconds,
)
elif isinstance(delta, pendulum.Duration):
return self.add(**delta._signature) # type: ignore[attr-defined]
return self.add(seconds=delta.total_seconds())
def _subtract_timedelta(self, delta: datetime.timedelta) -> Self:
"""
Remove timedelta duration from the instance.
"""
if isinstance(delta, pendulum.Duration):
return self.subtract(
years=delta.years, months=delta.months, seconds=delta._total
)
return self.subtract(seconds=delta.total_seconds())
# DIFFERENCES
def diff( # type: ignore[override]
self, dt: datetime.datetime | None = None, abs: bool = True
) -> Interval[datetime.datetime]:
"""
Returns the difference between two DateTime objects represented as an Interval.
"""
if dt is None:
dt = self.now(self.tz)
return Interval(self, dt, absolute=abs)
def diff_for_humans( # type: ignore[override]
self,
other: DateTime | None = None,
absolute: bool = False,
locale: str | None = None,
) -> str:
"""
Get the difference in a human readable format in the current locale.
When comparing a value in the past to default now:
1 day ago
5 months ago
When comparing a value in the future to default now:
1 day from now
5 months from now
When comparing a value in the past to another value:
1 day before
5 months before
When comparing a value in the future to another value:
1 day after
5 months after
"""
is_now = other is None
if is_now:
other = self.now()
diff = self.diff(other)
return pendulum.format_diff(diff, is_now, absolute, locale)
# Modifiers
def start_of(self, unit: str) -> Self:
"""
Returns a copy of the instance with the time reset
with the following rules:
* second: microsecond set to 0
* minute: second and microsecond set to 0
* hour: minute, second and microsecond set to 0
* day: time to 00:00:00
* week: date to first day of the week and time to 00:00:00
* month: date to first day of the month and time to 00:00:00
* year: date to first day of the year and time to 00:00:00
* decade: date to first day of the decade and time to 00:00:00
* century: date to first day of century and time to 00:00:00
"""
if unit not in self._MODIFIERS_VALID_UNITS:
raise ValueError(f'Invalid unit "{unit}" for start_of()')
return cast("Self", getattr(self, f"_start_of_{unit}")())
def end_of(self, unit: str) -> Self:
"""
Returns a copy of the instance with the time reset
with the following rules:
* second: microsecond set to 999999
* minute: second set to 59 and microsecond set to 999999
* hour: minute and second set to 59 and microsecond set to 999999
* day: time to 23:59:59.999999
* week: date to last day of the week and time to 23:59:59.999999
* month: date to last day of the month and time to 23:59:59.999999
* year: date to last day of the year and time to 23:59:59.999999
* decade: date to last day of the decade and time to 23:59:59.999999
* century: date to last day of century and time to 23:59:59.999999
"""
if unit not in self._MODIFIERS_VALID_UNITS:
raise ValueError(f'Invalid unit "{unit}" for end_of()')
return cast("Self", getattr(self, f"_end_of_{unit}")())
def _start_of_second(self) -> Self:
"""
Reset microseconds to 0.
"""
return self.set(microsecond=0)
def _end_of_second(self) -> Self:
"""
Set microseconds to 999999.
"""
return self.set(microsecond=999999)
def _start_of_minute(self) -> Self:
"""
Reset seconds and microseconds to 0.
"""
return self.set(second=0, microsecond=0)
def _end_of_minute(self) -> Self:
"""
Set seconds to 59 and microseconds to 999999.
"""
return self.set(second=59, microsecond=999999)
def _start_of_hour(self) -> Self:
"""
Reset minutes, seconds and microseconds to 0.
"""
return self.set(minute=0, second=0, microsecond=0)
def _end_of_hour(self) -> Self:
"""
Set minutes and seconds to 59 and microseconds to 999999.
"""
return self.set(minute=59, second=59, microsecond=999999)
def _start_of_day(self) -> Self:
"""
Reset the time to 00:00:00.
"""
return self.at(0, 0, 0, 0)
def _end_of_day(self) -> Self:
"""
Reset the time to 23:59:59.999999.
"""
return self.at(23, 59, 59, 999999)
def _start_of_month(self) -> Self:
"""
Reset the date to the first day of the month and the time to 00:00:00.
"""
return self.set(self.year, self.month, 1, 0, 0, 0, 0)
def _end_of_month(self) -> Self:
"""
Reset the date to the last day of the month
and the time to 23:59:59.999999.
"""
return self.set(self.year, self.month, self.days_in_month, 23, 59, 59, 999999)
def _start_of_year(self) -> Self:
"""
Reset the date to the first day of the year and the time to 00:00:00.
"""
return self.set(self.year, 1, 1, 0, 0, 0, 0)
def _end_of_year(self) -> Self:
"""
Reset the date to the last day of the year
and the time to 23:59:59.999999.
"""
return self.set(self.year, 12, 31, 23, 59, 59, 999999)
def _start_of_decade(self) -> Self:
"""
Reset the date to the first day of the decade
and the time to 00:00:00.
"""
year = self.year - self.year % YEARS_PER_DECADE
return self.set(year, 1, 1, 0, 0, 0, 0)
def _end_of_decade(self) -> Self:
"""
Reset the date to the last day of the decade
and the time to 23:59:59.999999.
"""
year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1
return self.set(year, 12, 31, 23, 59, 59, 999999)
def _start_of_century(self) -> Self:
"""
Reset the date to the first day of the century
and the time to 00:00:00.
"""
year = self.year - 1 - (self.year - 1) % YEARS_PER_CENTURY + 1
return self.set(year, 1, 1, 0, 0, 0, 0)
def _end_of_century(self) -> Self:
"""
Reset the date to the last day of the century
and the time to 23:59:59.999999.
"""
year = self.year - 1 - (self.year - 1) % YEARS_PER_CENTURY + YEARS_PER_CENTURY
return self.set(year, 12, 31, 23, 59, 59, 999999)
def _start_of_week(self) -> Self:
"""
Reset the date to the first day of the week
and the time to 00:00:00.
"""
dt = self
if self.day_of_week != pendulum._WEEK_STARTS_AT:
dt = self.previous(pendulum._WEEK_STARTS_AT)
return dt.start_of("day")
def _end_of_week(self) -> Self:
"""
Reset the date to the last day of the week
and the time to 23:59:59.
"""
dt = self
if self.day_of_week != pendulum._WEEK_ENDS_AT:
dt = self.next(pendulum._WEEK_ENDS_AT)
return dt.end_of("day")
def next(self, day_of_week: WeekDay | None = None, keep_time: bool = False) -> Self:
"""
Modify to the next occurrence of a given day of the week.
If no day_of_week is provided, modify to the next occurrence
of the current day of the week. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
if day_of_week is None:
day_of_week = self.day_of_week
if day_of_week < WeekDay.MONDAY or day_of_week > WeekDay.SUNDAY:
raise ValueError("Invalid day of week")
dt = self if keep_time else self.start_of("day")
dt = dt.add(days=1)
while dt.day_of_week != day_of_week:
dt = dt.add(days=1)
return dt
def previous(
self, day_of_week: WeekDay | None = None, keep_time: bool = False
) -> Self:
"""
Modify to the previous occurrence of a given day of the week.
If no day_of_week is provided, modify to the previous occurrence
of the current day of the week. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
if day_of_week is None:
day_of_week = self.day_of_week
if day_of_week < WeekDay.MONDAY or day_of_week > WeekDay.SUNDAY:
raise ValueError("Invalid day of week")
dt = self if keep_time else self.start_of("day")
dt = dt.subtract(days=1)
while dt.day_of_week != day_of_week:
dt = dt.subtract(days=1)
return dt
def first_of(self, unit: str, day_of_week: WeekDay | None = None) -> Self:
"""
Returns an instance set to the first occurrence
of a given day of the week in the current unit.
If no day_of_week is provided, modify to the first day of the unit.
Use the supplied consts to indicate the desired day_of_week,
ex. DateTime.MONDAY.
Supported units are month, quarter and year.
"""
if unit not in ["month", "quarter", "year"]:
raise ValueError(f'Invalid unit "{unit}" for first_of()')
return cast("Self", getattr(self, f"_first_of_{unit}")(day_of_week))
def last_of(self, unit: str, day_of_week: WeekDay | None = None) -> Self:
"""
Returns an instance set to the last occurrence
of a given day of the week in the current unit.
If no day_of_week is provided, modify to the last day of the unit.
Use the supplied consts to indicate the desired day_of_week,
ex. DateTime.MONDAY.
Supported units are month, quarter and year.
"""
if unit not in ["month", "quarter", "year"]:
raise ValueError(f'Invalid unit "{unit}" for first_of()')
return cast("Self", getattr(self, f"_last_of_{unit}")(day_of_week))
def nth_of(self, unit: str, nth: int, day_of_week: WeekDay) -> Self:
"""
Returns a new instance set to the given occurrence
of a given day of the week in the current unit.
If the calculated occurrence is outside the scope of the current unit,
then raise an error. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
Supported units are month, quarter and year.
"""
if unit not in ["month", "quarter", "year"]:
raise ValueError(f'Invalid unit "{unit}" for first_of()')
dt = cast("Optional[Self]", getattr(self, f"_nth_of_{unit}")(nth, day_of_week))
if not dt:
raise PendulumException(
f"Unable to find occurrence {nth}"
f" of {WeekDay(day_of_week).name.capitalize()} in {unit}"
)
return dt
def _first_of_month(self, day_of_week: WeekDay | None = None) -> Self:
"""
Modify to the first occurrence of a given day of the week
in the current month. If no day_of_week is provided,
modify to the first day of the month. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
dt = self.start_of("day")
if day_of_week is None:
return dt.set(day=1)
month = calendar.monthcalendar(dt.year, dt.month)
calendar_day = day_of_week
if month[0][calendar_day] > 0:
day_of_month = month[0][calendar_day]
else:
day_of_month = month[1][calendar_day]
return dt.set(day=day_of_month)
def _last_of_month(self, day_of_week: WeekDay | None = None) -> Self:
"""
Modify to the last occurrence of a given day of the week
in the current month. If no day_of_week is provided,
modify to the last day of the month. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
dt = self.start_of("day")
if day_of_week is None:
return dt.set(day=self.days_in_month)
month = calendar.monthcalendar(dt.year, dt.month)
calendar_day = day_of_week
if month[-1][calendar_day] > 0:
day_of_month = month[-1][calendar_day]
else:
day_of_month = month[-2][calendar_day]
return dt.set(day=day_of_month)
def _nth_of_month(
self, nth: int, day_of_week: WeekDay | None = None
) -> Self | None:
"""
Modify to the given occurrence of a given day of the week
in the current month. If the calculated occurrence is outside,
the scope of the current month, then return False and no
modifications are made. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
if nth == 1:
return self.first_of("month", day_of_week)
dt = self.first_of("month")
check = dt.format("%Y-%M")
for _ in range(nth - (1 if dt.day_of_week == day_of_week else 0)):
dt = dt.next(day_of_week)
if dt.format("%Y-%M") == check:
return self.set(day=dt.day).start_of("day")
return None
def _first_of_quarter(self, day_of_week: WeekDay | None = None) -> Self:
"""
Modify to the first occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the first day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
return self.on(self.year, self.quarter * 3 - 2, 1).first_of(
"month", day_of_week
)
def _last_of_quarter(self, day_of_week: WeekDay | None = None) -> Self:
"""
Modify to the last occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the last day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
return self.on(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
def _nth_of_quarter(
self, nth: int, day_of_week: WeekDay | None = None
) -> Self | None:
"""
Modify to the given occurrence of a given day of the week
in the current quarter. If the calculated occurrence is outside,
the scope of the current quarter, then return False and no
modifications are made. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
if nth == 1:
return self.first_of("quarter", day_of_week)
dt = self.set(day=1, month=self.quarter * 3)
last_month = dt.month
year = dt.year
dt = dt.first_of("quarter")
for _ in range(nth - (1 if dt.day_of_week == day_of_week else 0)):
dt = dt.next(day_of_week)
if last_month < dt.month or year != dt.year:
return None
return self.on(self.year, dt.month, dt.day).start_of("day")
def _first_of_year(self, day_of_week: WeekDay | None = None) -> Self:
"""
Modify to the first occurrence of a given day of the week
in the current year. If no day_of_week is provided,
modify to the first day of the year. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
return self.set(month=1).first_of("month", day_of_week)
def _last_of_year(self, day_of_week: WeekDay | None = None) -> Self:
"""
Modify to the last occurrence of a given day of the week
in the current year. If no day_of_week is provided,
modify to the last day of the year. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
return self.set(month=MONTHS_PER_YEAR).last_of("month", day_of_week)
def _nth_of_year(self, nth: int, day_of_week: WeekDay | None = None) -> Self | None:
"""
Modify to the given occurrence of a given day of the week
in the current year. If the calculated occurrence is outside,
the scope of the current year, then return False and no
modifications are made. Use the supplied consts
to indicate the desired day_of_week, ex. DateTime.MONDAY.
"""
if nth == 1:
return self.first_of("year", day_of_week)
dt = self.first_of("year")
year = dt.year
for _ in range(nth - (1 if dt.day_of_week == day_of_week else 0)):
dt = dt.next(day_of_week)
if year != dt.year:
return None
return self.on(self.year, dt.month, dt.day).start_of("day")
def average( # type: ignore[override]
self, dt: datetime.datetime | None = None
) -> Self:
"""
Modify the current instance to the average
of a given instance (default now) and the current instance.
"""
if dt is None:
dt = self.now(self.tz)
diff = self.diff(dt, False)
return self.add(
microseconds=(diff.in_seconds() * 1000000 + diff.microseconds) // 2
)
@overload # type: ignore[override]
def __sub__(self, other: datetime.timedelta) -> Self: ...
@overload
def __sub__(self, other: DateTime) -> Interval[datetime.datetime]: ...
def __sub__(
self, other: datetime.datetime | datetime.timedelta
) -> Self | Interval[datetime.datetime]:
if isinstance(other, datetime.timedelta):
return self._subtract_timedelta(other)
if not isinstance(other, datetime.datetime):
return NotImplemented
if not isinstance(other, self.__class__):
if other.tzinfo is None:
other = pendulum.naive(
other.year,
other.month,
other.day,
other.hour,
other.minute,
other.second,
other.microsecond,
)
else:
other = self.instance(other)
return other.diff(self, False)
def __rsub__(self, other: datetime.datetime) -> Interval[datetime.datetime]:
if not isinstance(other, datetime.datetime):
return NotImplemented
if not isinstance(other, self.__class__):
if other.tzinfo is None:
other = pendulum.naive(
other.year,
other.month,
other.day,
other.hour,
other.minute,
other.second,
other.microsecond,
)
else:
other = self.instance(other)
return self.diff(other, False)
def __add__(self, other: datetime.timedelta) -> Self:
if not isinstance(other, datetime.timedelta):
return NotImplemented
caller = traceback.extract_stack(limit=2)[0].name
if caller == "astimezone":
return super().__add__(other)
return self._add_timedelta_(other)
def __radd__(self, other: datetime.timedelta) -> Self:
return self.__add__(other)
# Native methods override
@classmethod
def fromtimestamp(cls, t: float, tz: datetime.tzinfo | None = None) -> Self:
tzinfo = pendulum._safe_timezone(tz)
return cls.instance(datetime.datetime.fromtimestamp(t, tz=tzinfo), tz=tzinfo)
@classmethod
def utcfromtimestamp(cls, t: float) -> Self:
return cls.instance(datetime.datetime.utcfromtimestamp(t), tz=None)
@classmethod
def fromordinal(cls, n: int) -> Self:
return cls.instance(datetime.datetime.fromordinal(n), tz=None)
@classmethod
def combine(
cls,
date: datetime.date,
time: datetime.time,
tzinfo: datetime.tzinfo | None = None,
) -> Self:
return cls.instance(datetime.datetime.combine(date, time), tz=tzinfo)
def astimezone(self, tz: datetime.tzinfo | None = None) -> Self:
dt = super().astimezone(tz)
return self.__class__(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
fold=dt.fold,
tzinfo=dt.tzinfo,
)
def replace(
self,
year: SupportsIndex | None = None,
month: SupportsIndex | None = None,
day: SupportsIndex | None = None,
hour: SupportsIndex | None = None,
minute: SupportsIndex | None = None,
second: SupportsIndex | None = None,
microsecond: SupportsIndex | None = None,
tzinfo: bool | datetime.tzinfo | Literal[True] | None = True,
fold: int | None = None,
) -> Self:
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self.fold
if tzinfo is not None:
tzinfo = pendulum._safe_timezone(tzinfo)
return self.__class__.create(
year,
month,
day,
hour,
minute,
second,
microsecond,
tz=tzinfo,
fold=fold,
)
def __getnewargs__(self) -> tuple[Self]:
return (self,)
def _getstate(
self, protocol: SupportsIndex = 3
) -> tuple[int, int, int, int, int, int, int, datetime.tzinfo | None]:
return (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo,
)
def __reduce__(
self,
) -> tuple[
type[Self],
tuple[int, int, int, int, int, int, int, datetime.tzinfo | None],
]:
return self.__reduce_ex__(2)
def __reduce_ex__(
self, protocol: SupportsIndex
) -> tuple[
type[Self],
tuple[int, int, int, int, int, int, int, datetime.tzinfo | None],
]:
return self.__class__, self._getstate(protocol)
def __deepcopy__(self, _: dict[int, Self]) -> Self:
return self.__class__(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
tzinfo=self.tz,
fold=self.fold,
)
def _cmp(self, other: datetime.datetime, **kwargs: Any) -> int:
# Fix for pypy which compares using this method
# which would lead to infinite recursion if we didn't override
dt = datetime.datetime(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
tzinfo=self.tz,
fold=self.fold,
)
return 0 if dt == other else 1 if dt > other else -1
DateTime.min = DateTime(1, 1, 1, 0, 0, tzinfo=UTC)
DateTime.max = DateTime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=UTC)
DateTime.EPOCH = DateTime(1970, 1, 1, tzinfo=UTC)
| DateTime |
python | python__mypy | mypyc/test-data/fixtures/ir.py | {
"start": 3085,
"end": 5312
} | class ____:
@overload
def __init__(self) -> None: pass
@overload
def __init__(self, x: object) -> None: pass
def __add__(self, x: str) -> str: pass
def __mul__(self, x: int) -> str: pass
def __rmul__(self, x: int) -> str: pass
def __eq__(self, x: object) -> bool: pass
def __ne__(self, x: object) -> bool: pass
def __lt__(self, x: str) -> bool: ...
def __le__(self, x: str) -> bool: ...
def __gt__(self, x: str) -> bool: ...
def __ge__(self, x: str) -> bool: ...
@overload
def __getitem__(self, i: int) -> str: pass
@overload
def __getitem__(self, i: slice) -> str: pass
def __contains__(self, item: str) -> bool: pass
def __iter__(self) -> Iterator[str]: ...
def find(self, sub: str, start: Optional[int] = None, end: Optional[int] = None, /) -> int: ...
def rfind(self, sub: str, start: Optional[int] = None, end: Optional[int] = None, /) -> int: ...
def split(self, sep: Optional[str] = None, maxsplit: int = -1) -> List[str]: pass
def rsplit(self, sep: Optional[str] = None, maxsplit: int = -1) -> List[str]: pass
def splitlines(self, keepends: bool = False) -> List[str]: ...
def strip (self, item: Optional[str] = None) -> str: pass
def lstrip(self, item: Optional[str] = None) -> str: pass
def rstrip(self, item: Optional[str] = None) -> str: pass
def join(self, x: Iterable[str]) -> str: pass
def format(self, *args: Any, **kwargs: Any) -> str: ...
def upper(self) -> str: ...
def startswith(self, x: Union[str, Tuple[str, ...]], start: int=..., end: int=...) -> bool: ...
def endswith(self, x: Union[str, Tuple[str, ...]], start: int=..., end: int=...) -> bool: ...
def replace(self, old: str, new: str, maxcount: int=...) -> str: ...
def encode(self, encoding: str=..., errors: str=...) -> bytes: ...
def partition(self, sep: str, /) -> Tuple[str, str, str]: ...
def rpartition(self, sep: str, /) -> Tuple[str, str, str]: ...
def removeprefix(self, prefix: str, /) -> str: ...
def removesuffix(self, suffix: str, /) -> str: ...
def islower(self) -> bool: ...
def count(self, substr: str, start: Optional[int] = None, end: Optional[int] = None) -> int: pass
| str |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_cloud_build.py | {
"start": 12832,
"end": 14469
} | class ____:
@pytest.fixture
def hook(self):
return CloudBuildAsyncHook(
gcp_conn_id="google_cloud_default",
)
@pytest.mark.asyncio
@mock.patch(CLOUD_BUILD_PATH.format("CloudBuildAsyncHook.get_credentials"))
@mock.patch(CLOUD_BUILD_PATH.format("CloudBuildAsyncClient.get_build"))
async def test_async_cloud_build_service_client_creation_should_execute_successfully(
self, mocked_get_build, mock_get_creds, hook, mocker
):
fake_credentials = mock.MagicMock(name="FakeCreds")
mock_get_creds.return_value = fake_credentials
mocked_cloud_build_async_client = mocker.patch.object(
CloudBuildAsyncClient, "__init__", return_value=None, spec=CloudBuildAsyncClient
)
mocked_get_build.return_value = Future()
await hook.get_cloud_build(project_id=PROJECT_ID, id_=BUILD_ID)
request = GetBuildRequest(
dict(
project_id=PROJECT_ID,
id=BUILD_ID,
)
)
mock_get_creds.assert_called_once()
mocked_get_build.assert_called_once_with(request=request, retry=DEFAULT, timeout=None, metadata=())
mocked_cloud_build_async_client.assert_called_once_with(
client_info=mock.ANY, client_options=None, credentials=fake_credentials
)
@pytest.mark.asyncio
async def test_async_get_clod_build_without_build_id_should_throw_exception(self, hook):
with pytest.raises(AirflowException, match=r"Google Cloud Build id is required."):
await hook.get_cloud_build(project_id=PROJECT_ID, id_=None)
| TestAsyncHook |
python | pappasam__jedi-language-server | jedi_language_server/initialization_options.py | {
"start": 906,
"end": 1048
} | class ____:
enable: bool = True
did_open: bool = True
did_save: bool = True
did_change: bool = True
@light_dataclass
| Diagnostics |
python | celery__celery | t/smoke/tests/quorum_queues/test_quorum_queues.py | {
"start": 269,
"end": 1035
} | class ____:
def test_queue_type(self, celery_setup: CeleryTestSetup):
broker: RabbitMQManagementBroker = celery_setup.broker
api = broker.get_management_url() + "/api/queues"
response = requests.get(api, auth=HTTPBasicAuth("guest", "guest"))
assert response.status_code == 200
res = response.json()
assert isinstance(res, list)
worker_queue = next((queue for queue in res if queue["name"] == celery_setup.worker.worker_queue), None)
assert worker_queue is not None, f'"{celery_setup.worker.worker_queue}" queue not found'
queue_type = worker_queue.get("type")
assert queue_type == "quorum", f'"{celery_setup.worker.worker_queue}" queue is not a quorum queue'
| test_broker_configuration |
python | getsentry__sentry | tests/sentry/search/eap/test_ourlogs.py | {
"start": 605,
"end": 13577
} | class ____(TestCase):
def setUp(self) -> None:
self.resolver = SearchResolver(
params=SnubaParams(), config=SearchResolverConfig(), definitions=OURLOG_DEFINITIONS
)
def test_freetext_search_query(self) -> None:
where, having, _ = self.resolver.resolve_query("foo")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.body", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_LIKE,
value=AttributeValue(val_str="%foo%"),
)
)
assert having is None
def test_simple_query(self) -> None:
where, having, _ = self.resolver.resolve_query("message:foo")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.body", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="foo"),
)
)
assert having is None
def test_negation(self) -> None:
where, having, _ = self.resolver.resolve_query("!message:foo")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.body", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_NOT_EQUALS,
value=AttributeValue(val_str="foo"),
)
)
assert having is None
def test_in_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("message:[foo,bar,baz]")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.body", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_IN,
value=AttributeValue(val_str_array=StrArray(values=["foo", "bar", "baz"])),
)
)
assert having is None
def test_not_in_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("!message:[foo,bar,baz]")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.body", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_NOT_IN,
value=AttributeValue(val_str_array=StrArray(values=["foo", "bar", "baz"])),
)
)
assert having is None
def test_in_numeric_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("severity_number:[123,456,789]")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.severity_number", type=AttributeKey.Type.TYPE_INT),
op=ComparisonFilter.OP_IN,
value=AttributeValue(val_int_array=IntArray(values=[123, 456, 789])),
)
)
assert having is None
def test_greater_than_numeric_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("severity_number:>123")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.severity_number", type=AttributeKey.Type.TYPE_INT),
op=ComparisonFilter.OP_GREATER_THAN,
value=AttributeValue(val_int=123),
)
)
assert having is None
def test_query_with_and(self) -> None:
where, having, _ = self.resolver.resolve_query("message:foo severity_text:bar")
assert where == TraceItemFilter(
and_filter=AndFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.body", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="foo"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.severity_text", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="bar"),
)
),
]
)
)
assert having is None
def test_query_with_or(self) -> None:
where, having, _ = self.resolver.resolve_query("message:foo or severity_text:bar")
assert where == TraceItemFilter(
or_filter=OrFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.body", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="foo"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.severity_text", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="bar"),
)
),
]
)
)
assert having is None
def test_query_with_or_and_brackets(self) -> None:
where, having, _ = self.resolver.resolve_query(
"(message:123 and severity_text:345) or (message:foo and severity:bar)"
)
assert where == TraceItemFilter(
or_filter=OrFilter(
filters=[
TraceItemFilter(
and_filter=AndFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.body", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="123"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.severity_text",
type=AttributeKey.Type.TYPE_STRING,
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="345"),
)
),
]
)
),
TraceItemFilter(
and_filter=AndFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.body", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="foo"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.severity_text",
type=AttributeKey.Type.TYPE_STRING,
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="bar"),
)
),
]
)
),
]
)
)
def test_empty_query(self) -> None:
where, having, _ = self.resolver.resolve_query("")
assert where is None
assert having is None
def test_none_query(self) -> None:
where, having, _ = self.resolver.resolve_query(None)
assert where is None
assert having is None
def test_count_default_argument() -> None:
resolver = SearchResolver(
params=SnubaParams(), config=SearchResolverConfig(), definitions=OURLOG_DEFINITIONS
)
resolved_column, virtual_context = resolver.resolve_column("count()")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(name="sentry.project_id", type=AttributeKey.Type.TYPE_INT),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
@pytest.mark.parametrize(
"function_name,proto_function",
[
("count", Function.FUNCTION_COUNT),
("sum", Function.FUNCTION_SUM),
("avg", Function.FUNCTION_AVG),
("p50", Function.FUNCTION_P50),
("p75", Function.FUNCTION_P75),
("p90", Function.FUNCTION_P90),
("p95", Function.FUNCTION_P95),
("p99", Function.FUNCTION_P99),
("max", Function.FUNCTION_MAX),
("min", Function.FUNCTION_MIN),
],
)
def test_monoid_functions(function_name, proto_function) -> None:
resolver = SearchResolver(
params=SnubaParams(), config=SearchResolverConfig(), definitions=OURLOG_DEFINITIONS
)
for attr, proto_attr, proto_type in (
("severity_number", "sentry.severity_number", AttributeKey.Type.TYPE_INT),
("tags[user_attribute,number]", "user_attribute", AttributeKey.Type.TYPE_DOUBLE),
):
resolved_column, virtual_context = resolver.resolve_column(f"{function_name}({attr})")
assert resolved_column.proto_definition == AttributeAggregation(
aggregate=proto_function,
key=AttributeKey(name=proto_attr, type=proto_type),
label=f"{function_name}({attr})",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
)
assert virtual_context is None
@pytest.mark.parametrize(
"test_case",
[
{
"attribute_definition": OURLOG_DEFINITIONS.columns["observed_timestamp"],
"search_term": 1234567890,
"expected_value": AttributeValue(val_str="1234567890.0"),
"expected_search_proto_type": AttributeKey.Type.TYPE_STRING,
},
{
"attribute_definition": OURLOG_DEFINITIONS.columns["observed_timestamp"],
"search_term": "1111111111",
"expected_value": AttributeValue(val_str="1111111111.0"),
"expected_search_proto_type": AttributeKey.Type.TYPE_STRING,
},
{
"attribute_definition": OURLOG_DEFINITIONS.columns["payload_size"],
"search_term": 1337,
"expected_value": AttributeValue(val_double=1337),
"expected_search_proto_type": AttributeKey.Type.TYPE_DOUBLE,
},
],
)
def test_attribute_search(test_case) -> None:
attribute_definition = test_case["attribute_definition"]
search_term = test_case["search_term"]
expected_value = test_case["expected_value"]
expected_search_proto_type = test_case["expected_search_proto_type"]
attribute_alias = attribute_definition.public_alias
resolver = SearchResolver(
params=SnubaParams(), config=SearchResolverConfig(), definitions=OURLOG_DEFINITIONS
)
query = f"{attribute_alias}:{search_term}"
where, having, _ = resolver.resolve_query(query)
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name=attribute_definition.internal_name, type=expected_search_proto_type
),
op=ComparisonFilter.OP_EQUALS,
value=expected_value,
)
)
assert having is None
| SearchResolverQueryTest |
python | getsentry__sentry | src/sentry/integrations/analytics.py | {
"start": 955,
"end": 1129
} | class ____(analytics.Event):
provider: str
id: int
organization_id: int
@analytics.eventclass("integration.issue.assignee.synced")
| IntegrationIssueStatusSyncedEvent |
python | falconry__falcon | tests/test_httpstatus.py | {
"start": 1718,
"end": 1956
} | class ____:
def on_get(self, req, resp):
resp.status_code = 500
resp.set_header('X-Failed', 'True')
resp.text = 'Fail'
def on_patch(self, req, resp):
raise HTTPStatus(200, text=None)
| TestHookResource |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/fields.py | {
"start": 8448,
"end": 10392
} | class ____(BaseField):
"""Field for embedded models."""
def __init__(
self,
model_types: Union[List[Union[str, Type]], Tuple[Union[str, Type]]],
*args: Any,
**kwargs: Any,
) -> None:
self._assign_model_types(model_types)
super(EmbeddedField, self).__init__(*args, **kwargs)
def _assign_model_types(self, model_types: Union[list, tuple, Any]) -> None:
if not isinstance(model_types, (list, tuple)):
model_types = (model_types,)
types = []
for type_ in model_types:
if isinstance(type_, six.string_types):
types.append(_LazyType(type_))
else:
types.append(type_)
self.types = tuple(types)
def _finish_initialization(self, owner: type) -> None:
super(EmbeddedField, self)._finish_initialization(owner)
types = []
for type in self.types:
if isinstance(type, _LazyType):
types.append(type.evaluate(owner))
else:
types.append(type)
self.types = tuple(types)
def validate(self, value: Any) -> None:
super(EmbeddedField, self).validate(value)
try:
value.validate()
except AttributeError:
pass
def parse_value(self, value: Any) -> Any:
"""Parse value to proper model type."""
if not isinstance(value, dict):
return value
embed_type = self._get_embed_type()
return embed_type(**value)
def _get_embed_type(self) -> type:
if len(self.types) != 1:
raise ValidationError(
'Cannot decide which type to choose from "{types}".'.format(
types=", ".join([t.__name__ for t in self.types])
)
)
return self.types[0]
def to_struct(self, value: Any) -> Any:
return value.to_struct()
| EmbeddedField |
python | protocolbuffers__protobuf | python/google/protobuf/internal/type_checkers.py | {
"start": 8724,
"end": 8902
} | class ____(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
| Int32ValueChecker |
python | jmcnamara__XlsxWriter | xlsxwriter/test/xmlwriter/test_xmlwriter.py | {
"start": 301,
"end": 5009
} | class ____(unittest.TestCase):
"""
Test the XML Writer class.
"""
def setUp(self):
self.fh = StringIO()
self.writer = XMLwriter()
self.writer._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test _xml_declaration()"""
self.writer._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_start_tag(self):
"""Test _xml_start_tag() with no attributes"""
self.writer._xml_start_tag("foo")
exp = """<foo>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_start_tag_with_attributes(self):
"""Test _xml_start_tag() with attributes"""
self.writer._xml_start_tag("foo", [("span", "8"), ("baz", "7")])
exp = """<foo span="8" baz="7">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_start_tag_with_attributes_to_escape(self):
"""Test _xml_start_tag() with attributes requiring escaping"""
self.writer._xml_start_tag("foo", [("span", '&<>"')])
exp = """<foo span="&<>"">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_start_tag_unencoded(self):
"""Test _xml_start_tag_unencoded() with attributes"""
self.writer._xml_start_tag_unencoded("foo", [("span", '&<>"')])
exp = """<foo span="&<>"">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_end_tag(self):
"""Test _xml_end_tag()"""
self.writer._xml_end_tag("foo")
exp = """</foo>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_empty_tag(self):
"""Test _xml_empty_tag()"""
self.writer._xml_empty_tag("foo")
exp = """<foo/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_empty_tag_with_attributes(self):
"""Test _xml_empty_tag() with attributes"""
self.writer._xml_empty_tag("foo", [("span", "8")])
exp = """<foo span="8"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_empty_tag_unencoded(self):
"""Test _xml_empty_tag_unencoded() with attributes"""
self.writer._xml_empty_tag_unencoded("foo", [("span", "&")])
exp = """<foo span="&"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_data_element(self):
"""Test _xml_data_element()"""
self.writer._xml_data_element("foo", "bar")
exp = """<foo>bar</foo>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_data_element_with_attributes(self):
"""Test _xml_data_element() with attributes"""
self.writer._xml_data_element("foo", "bar", [("span", "8")])
exp = """<foo span="8">bar</foo>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_data_element_with_escapes(self):
"""Test _xml_data_element() with data requiring escaping"""
self.writer._xml_data_element("foo", '&<>"', [("span", "8")])
exp = """<foo span="8">&<>"</foo>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_string_element(self):
"""Test _xml_string_element()"""
self.writer._xml_string_element(99, [("span", "8")])
exp = """<c span="8" t=\"s\"><v>99</v></c>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_si_element(self):
"""Test _xml_si_element()"""
self.writer._xml_si_element("foo", [("span", "8")])
exp = """<si><t span="8">foo</t></si>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_rich_si_element(self):
"""Test _xml_rich_si_element()"""
self.writer._xml_rich_si_element("foo")
exp = """<si>foo</si>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_number_element(self):
"""Test _xml_number_element()"""
self.writer._xml_number_element(99, [("span", "8")])
exp = """<c span="8"><v>99</v></c>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_xml_formula_element(self):
"""Test _xml_formula_element()"""
self.writer._xml_formula_element("1+2", 3, [("span", "8")])
exp = """<c span="8"><f>1+2</f><v>3</v></c>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestXMLwriter |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 160937,
"end": 162784
} | class ____(Response):
"""
Response of tasks.edit endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "edit"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(EditResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| EditResponse |
python | getsentry__sentry | tests/sentry_plugins/pivotal/test_pivotal_plugin.py | {
"start": 274,
"end": 2106
} | class ____(PluginTestCase):
@cached_property
def plugin(self) -> PivotalPlugin:
return PivotalPlugin()
def test_get_issue_label(self) -> None:
group = self.create_group(message="Hello world", culprit="foo.bar")
assert self.plugin.get_issue_label(group, "1") == "#1"
def test_get_issue_url(self) -> None:
group = self.create_group(message="Hello world", culprit="foo.bar")
assert (
self.plugin.get_issue_url(group, "1") == "https://www.pivotaltracker.com/story/show/1"
)
def test_is_configured(self) -> None:
assert self.plugin.is_configured(self.project) is False
self.plugin.set_option("token", "1", self.project)
self.plugin.set_option("project", "1", self.project)
assert self.plugin.is_configured(self.project) is True
def test_no_secrets(self) -> None:
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
self.plugin.set_option("token", "abcdef", self.project)
url = reverse(
"sentry-api-0-project-plugin-details",
args=[self.org.slug, self.project.slug, "pivotal"],
)
res = self.client.get(url)
config = orjson.loads(res.content)["config"]
token_config = [item for item in config if item["name"] == "token"][0]
assert token_config.get("type") == "secret"
assert token_config.get("value") is None
assert token_config.get("hasSavedValue") is True
assert token_config.get("prefix") == "abcd"
| PivotalPluginTest |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 4395,
"end": 8335
} | class ____(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
parse_version = _packaging_version.Version
_state_vars: dict[str, str] = {}
def _declare_state(vartype: str, varname: str, initial_value: _T) -> _T:
_state_vars[varname] = vartype
return initial_value
def __getstate__() -> dict[str, Any]:
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state: dict[str, Any]) -> dict[str, Any]:
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of macOS that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of macOS that we are *running*. To allow usage of packages that
explicitly require a newer version of macOS, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
except ValueError:
# not macOS
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require',
'run_script',
'get_provider',
'get_distribution',
'load_entry_point',
'get_entry_map',
'get_entry_info',
'iter_entry_points',
'resource_string',
'resource_stream',
'resource_filename',
'resource_listdir',
'resource_exists',
'resource_isdir',
# Environmental control
'declare_namespace',
'working_set',
'add_activation_listener',
'find_distributions',
'set_extraction_path',
'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment',
'WorkingSet',
'ResourceManager',
'Distribution',
'Requirement',
'EntryPoint',
# Exceptions
'ResolutionError',
'VersionConflict',
'DistributionNotFound',
'UnknownExtra',
'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements',
'parse_version',
'safe_name',
'safe_version',
'get_platform',
'compatible_platforms',
'yield_lines',
'split_sections',
'safe_extra',
'to_filename',
'invalid_marker',
'evaluate_marker',
# filesystem utilities
'ensure_directory',
'normalize_path',
# Distribution "precedence" constants
'EGG_DIST',
'BINARY_DIST',
'SOURCE_DIST',
'CHECKOUT_DIST',
'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider',
'IResourceProvider',
'FileMetadata',
'PathMetadata',
'EggMetadata',
'EmptyProvider',
'empty_provider',
'NullProvider',
'EggProvider',
'DefaultProvider',
'ZipProvider',
'register_finder',
'register_namespace_handler',
'register_loader_type',
'fixup_namespace_packages',
'get_importer',
# Warnings
'PkgResourcesDeprecationWarning',
# Deprecated/backward compatibility only
'run_main',
'AvailableDistributions',
]
| PEP440Warning |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-fireworks/llama_index/embeddings/fireworks/base.py | {
"start": 502,
"end": 2547
} | class ____(OpenAIEmbedding):
"""
Fireworks class for embeddings.
Args:
model (str): Model for embedding.
Defaults to "nomic-ai/nomic-embed-text-v1.5"
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
api_key: str = Field(description="The Fireworks API key.")
api_base: str = Field(description="The base URL for Fireworks API.")
api_version: str = Field(description="The version for OpenAI API.")
def __init__(
self,
model_name: str = DEFAULT_MODEL,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = DEFAULT_API_BASE,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
api_key, api_base, api_version = resolve_fireworks_credentials(
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
super().__init__(
model_name=model_name,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "FireworksEmbedding"
| FireworksEmbedding |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 4185,
"end": 4368
} | class ____:
name: str
slug: Optional[str]
description: Optional[str]
p = AddProject(name='x', slug='y', description='z')
# Same as Model, but with frozen = True
| AddProject |
python | falconry__falcon | falcon/errors.py | {
"start": 61623,
"end": 64288
} | class ____(HTTPError):
"""431 Request Header Fields Too Large.
The 431 status code indicates that the server is unwilling to process
the request because its header fields are too large. The request MAY
be resubmitted after reducing the size of the request header fields.
It can be used both when the set of request header fields in total is
too large, and when a single header field is at fault. In the latter
case, the response representation SHOULD specify which header field
was too large.
Responses with the 431 status code MUST NOT be stored by a cache.
(See also: RFC 6585, Section 5)
All the arguments are defined as keyword-only.
Keyword Args:
title (str): Error title (default '431 Request Header Fields Too Large').
description (str): Human-friendly description of the rate limit that
was exceeded.
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
):
super().__init__(
status.HTTP_431,
title=title,
description=description,
headers=headers,
**kwargs, # type: ignore[arg-type]
)
| HTTPRequestHeaderFieldsTooLarge |
python | spack__spack | lib/spack/spack/install_test.py | {
"start": 29271,
"end": 41579
} | class ____:
"""The class that manages specs for ``spack test run`` execution."""
def __init__(self, specs: Iterable[Spec], alias: Optional[str] = None) -> None:
# copy so that different test suites have different package objects
# even if they contain the same spec
self.specs = [spec.copy() for spec in specs]
self.current_test_spec = None # spec currently tested, can be virtual
self.current_base_spec = None # spec currently running do_test
self.alias = alias
self._hash: Optional[str] = None
self._stage: Optional[Prefix] = None
self.counts: "Counter" = Counter()
self.reports: List[spack.report.RequestRecord] = []
@property
def name(self) -> str:
"""The name (alias or, if none, hash) of the test suite."""
return self.alias if self.alias else self.content_hash
@property
def content_hash(self) -> str:
"""The hash used to uniquely identify the test suite."""
if not self._hash:
json_text = sjson.dump(self.to_dict())
assert json_text is not None, f"{__name__} unexpected value for 'json_text'"
sha = hashlib.sha1(json_text.encode("utf-8"))
b32_hash = base64.b32encode(sha.digest()).lower()
b32_hash = b32_hash.decode("utf-8")
self._hash = b32_hash
return self._hash
def __call__(
self,
*,
remove_directory: bool = True,
dirty: bool = False,
fail_first: bool = False,
externals: bool = False,
timeout: Optional[int] = None,
):
self.write_reproducibility_data()
for spec in self.specs:
# Setup cdash/junit/etc reports
report = spack.report.RequestRecord(spec)
self.reports.append(report)
record = spack.report.TestRecord(spec, self.stage)
report.append_record(record)
record.start()
try:
if spec.package.test_suite:
raise TestSuiteSpecError(
f"Package {spec.package.name} cannot be run in two test suites at once"
)
# Set up the test suite to know which test is running
spec.package.test_suite = self
self.current_base_spec = spec
self.current_test_spec = spec
# setup per-test directory in the stage dir
test_dir = self.test_dir_for_spec(spec)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
fs.mkdirp(test_dir)
# run the package tests
spec.package.do_test(dirty=dirty, externals=externals, timeout=timeout)
# Clean up on success
if remove_directory:
shutil.rmtree(test_dir)
status = self.test_status(spec, externals)
self.counts[status] += 1
self.write_test_result(spec, status)
record.succeed(externals)
except SkipTest:
record.skip(msg="Test marked to skip")
status = TestStatus.SKIPPED
self.counts[status] += 1
self.write_test_result(spec, TestStatus.SKIPPED)
except BaseException as exc:
record.fail(exc)
status = TestStatus.FAILED
self.counts[status] += 1
tty.debug(f"Test failure: {str(exc)}")
if isinstance(exc, (SyntaxError, TestSuiteSpecError)):
# Create the test log file and report the error.
self.ensure_stage()
msg = f"Testing package {self.test_pkg_id(spec)}\n{str(exc)}"
_add_msg_to_file(self.log_file_for_spec(spec), msg)
msg = f"Test failure: {str(exc)}"
_add_msg_to_file(self.log_file_for_spec(spec), msg)
self.write_test_result(spec, TestStatus.FAILED)
if fail_first:
break
finally:
spec.package.test_suite = None
self.current_test_spec = None
self.current_base_spec = None
write_test_summary(self.counts)
if self.counts[TestStatus.FAILED]:
for spec in self.specs:
print(
"\nSee {} test results at:\n {}".format(
spec.format("{name}-{version}-{hash:7}"), self.log_file_for_spec(spec)
)
)
failures = self.counts[TestStatus.FAILED]
if failures:
raise TestSuiteFailure(failures)
def test_status(self, spec: spack.spec.Spec, externals: bool) -> TestStatus:
"""Returns the overall test results status for the spec.
Args:
spec: instance of the spec under test
externals: ``True`` if externals are to be tested, else ``False``
"""
tests_status_file = self.tested_file_for_spec(spec)
if not os.path.exists(tests_status_file):
self.ensure_stage()
if spec.external and not externals:
status = TestStatus.SKIPPED
elif not spec.installed:
status = TestStatus.SKIPPED
else:
status = TestStatus.NO_TESTS
return status
with open(tests_status_file, "r", encoding="utf-8") as f:
value = (f.read()).strip("\n")
return TestStatus(int(value)) if value else TestStatus.NO_TESTS
def ensure_stage(self) -> None:
"""Ensure the test suite stage directory exists."""
if not os.path.exists(self.stage):
fs.mkdirp(self.stage)
@property
def stage(self) -> Prefix:
"""The root test suite stage directory"""
if not self._stage:
self._stage = Prefix(fs.join_path(get_test_stage_dir(), self.content_hash))
return self._stage
@stage.setter
def stage(self, value: Union[Prefix, str]) -> None:
"""Set the value of a non-default stage directory."""
self._stage = value if isinstance(value, Prefix) else Prefix(value)
@property
def results_file(self) -> Prefix:
"""The path to the results summary file."""
return self.stage.join(results_filename)
@classmethod
def test_pkg_id(cls, spec: Spec) -> str:
"""The standard install test package identifier.
Args:
spec: instance of the spec under test
"""
return spec.format_path("{name}-{version}-{hash:7}")
@classmethod
def test_log_name(cls, spec: Spec) -> str:
"""The standard log filename for a spec.
Args:
spec: instance of the spec under test
"""
return f"{cls.test_pkg_id(spec)}-test-out.txt"
def log_file_for_spec(self, spec: Spec) -> Prefix:
"""The test log file path for the provided spec.
Args:
spec: instance of the spec under test
"""
return self.stage.join(self.test_log_name(spec))
def test_dir_for_spec(self, spec: Spec) -> Prefix:
"""The path to the test stage directory for the provided spec.
Args:
spec: instance of the spec under test
"""
return Prefix(self.stage.join(self.test_pkg_id(spec)))
@classmethod
def tested_file_name(cls, spec: Spec) -> str:
"""The standard test status filename for the spec.
Args:
spec: instance of the spec under test
"""
return "%s-tested.txt" % cls.test_pkg_id(spec)
def tested_file_for_spec(self, spec: Spec) -> str:
"""The test status file path for the spec.
Args:
spec: instance of the spec under test
"""
return fs.join_path(self.stage, self.tested_file_name(spec))
@property
def current_test_cache_dir(self) -> str:
"""Path to the test stage directory where the current spec's cached
build-time files were automatically copied.
Raises:
TestSuiteSpecError: If there is no spec being tested
"""
if not (self.current_test_spec and self.current_base_spec):
raise TestSuiteSpecError("Unknown test cache directory: no specs being tested")
test_spec = self.current_test_spec
base_spec = self.current_base_spec
return self.test_dir_for_spec(base_spec).cache.join(test_spec.name)
@property
def current_test_data_dir(self) -> str:
"""Path to the test stage directory where the current spec's custom
package (data) files were automatically copied.
Raises:
TestSuiteSpecError: If there is no spec being tested
"""
if not (self.current_test_spec and self.current_base_spec):
raise TestSuiteSpecError("Unknown test data directory: no specs being tested")
test_spec = self.current_test_spec
base_spec = self.current_base_spec
return self.test_dir_for_spec(base_spec).data.join(test_spec.name)
def write_test_result(self, spec: Spec, result: TestStatus) -> None:
"""Write the spec's test result to the test suite results file.
Args:
spec: instance of the spec under test
result: result from the spec's test execution (e.g, PASSED)
"""
msg = f"{self.test_pkg_id(spec)} {result}"
_add_msg_to_file(self.results_file, msg)
def write_reproducibility_data(self) -> None:
for spec in self.specs:
repo_cache_path = self.stage.repo.join(spec.name)
spack.repo.PATH.dump_provenance(spec, repo_cache_path)
for vspec in spec.package.virtuals_provided:
repo_cache_path = self.stage.repo.join(vspec.name)
if not os.path.exists(repo_cache_path):
try:
spack.repo.PATH.dump_provenance(vspec, repo_cache_path)
except spack.repo.UnknownPackageError:
pass # not all virtuals have package files
write_test_suite_file(self)
def to_dict(self) -> Dict[str, Any]:
"""Build a dictionary for the test suite.
Returns:
The dictionary contains entries for up to two keys.
* specs: list of the test suite's specs in dictionary form
* alias: the alias, or name, given to the test suite if provided
"""
specs = [s.to_dict() for s in self.specs]
d: Dict[str, Any] = {"specs": specs}
if self.alias:
d["alias"] = self.alias
return d
@staticmethod
def from_dict(d):
"""Instantiates a TestSuite based on a dictionary specs and an
optional alias:
* specs: list of the test suite's specs in dictionary form
* alias: the test suite alias
Returns:
TestSuite: Instance created from the specs
"""
specs = [Spec.from_dict(spec_dict) for spec_dict in d["specs"]]
alias = d.get("alias", None)
return TestSuite(specs, alias)
@staticmethod
def from_file(filename: str) -> "TestSuite":
"""Instantiate a TestSuite using the specs and optional alias
provided in the given file.
Args:
filename: The path to the JSON file containing the test
suite specs and optional alias.
Raises:
BaseException: sjson.SpackJSONError if problem parsing the file
"""
try:
with open(filename, encoding="utf-8") as f:
data = sjson.load(f)
test_suite = TestSuite.from_dict(data)
content_hash = os.path.basename(os.path.dirname(filename))
test_suite._hash = content_hash
return test_suite
except Exception as e:
raise sjson.SpackJSONError("error parsing JSON TestSuite:", e)
def _add_msg_to_file(filename, msg):
"""Append the message to the specified file.
Args:
filename (str): path to the file
msg (str): message to be appended to the file
"""
with open(filename, "a+", encoding="utf-8") as f:
f.write(f"{msg}\n")
| TestSuite |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 6947,
"end": 7084
} | class ____(enum.Enum):
METHOD = 'method'
STATICMETHOD = 'staticmethod'
CLASSMETHOD = 'classmethod'
PROPERTY = 'property'
| MethodKind |
python | TheAlgorithms__Python | web_programming/instagram_crawler.py | {
"start": 592,
"end": 4274
} | class ____:
"""
Class Instagram crawl instagram user information
Usage: (doctest failing on GitHub Actions)
# >>> instagram_user = InstagramUser("github")
# >>> instagram_user.is_verified
True
# >>> instagram_user.biography
'Built for developers.'
"""
def __init__(self, username):
self.url = f"https://www.instagram.com/{username}/"
self.user_data = self.get_json()
def get_json(self) -> dict:
"""
Return a dict of user information
"""
html = httpx.get(self.url, headers=headers, timeout=10).text
scripts = BeautifulSoup(html, "html.parser").find_all("script")
try:
return extract_user_profile(scripts[4])
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3])
def __repr__(self) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__(self) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def username(self) -> str:
return self.user_data["username"]
@property
def fullname(self) -> str:
return self.user_data["full_name"]
@property
def biography(self) -> str:
return self.user_data["biography"]
@property
def email(self) -> str:
return self.user_data["business_email"]
@property
def website(self) -> str:
return self.user_data["external_url"]
@property
def number_of_followers(self) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def number_of_followings(self) -> int:
return self.user_data["edge_follow"]["count"]
@property
def number_of_posts(self) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def profile_picture_url(self) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def is_verified(self) -> bool:
return self.user_data["is_verified"]
@property
def is_private(self) -> bool:
return self.user_data["is_private"]
def test_instagram_user(username: str = "github") -> None:
"""
A self running doctest
>>> test_instagram_user()
"""
import os
if os.environ.get("CI"):
return # test failing on GitHub Actions
instagram_user = InstagramUser(username)
assert instagram_user.user_data
assert isinstance(instagram_user.user_data, dict)
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram.")
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
instagram_user = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| InstagramUser |
python | zarr-developers__zarr-python | src/zarr/core/buffer/gpu.py | {
"start": 3713,
"end": 7776
} | class ____(core.NDBuffer):
"""A n-dimensional memory block on the GPU
We use NDBuffer throughout Zarr to represent a n-dimensional memory block.
A NDBuffer is backed by an underlying ndarray-like instance that represents
the memory. The memory type is unspecified; can be regular host memory,
CUDA device memory, or something else. The only requirement is that the
ndarray-like instance can be copied/converted to a regular Numpy array
(host memory).
Notes
-----
The two buffer classes Buffer and NDBuffer are very similar. In fact, Buffer
is a special case of NDBuffer where dim=1, stride=1, and dtype="B". However,
in order to use Python's type system to differentiate between the contiguous
Buffer and the n-dim (non-contiguous) NDBuffer, we keep the definition of the
two classes separate.
Parameters
----------
array
ndarray-like object that is convertible to a regular Numpy array.
"""
def __init__(self, array: NDArrayLike) -> None:
if cp is None:
raise ImportError(
"Cannot use zarr.buffer.gpu.NDBuffer without cupy. Please install cupy."
)
# assert array.ndim > 0
assert array.dtype != object
self._data = array
if not hasattr(array, "__cuda_array_interface__"):
# Slow copy based path for arrays that don't support the __cuda_array_interface__
# TODO: Add a fast zero-copy path for arrays that support the dlpack protocol
msg = (
"Creating a zarr.buffer.gpu.NDBuffer with an array that does not support the "
"__cuda_array_interface__ for zero-copy transfers, "
"falling back to slow copy based path"
)
warnings.warn(
msg,
stacklevel=2,
)
self._data = cp.asarray(array)
@classmethod
def create(
cls,
*,
shape: Iterable[int],
dtype: npt.DTypeLike,
order: Literal["C", "F"] = "C",
fill_value: Any | None = None,
) -> Self:
ret = cls(cp.empty(shape=tuple(shape), dtype=dtype, order=order))
if fill_value is not None:
ret.fill(fill_value)
return ret
@classmethod
def empty(
cls, shape: tuple[int, ...], dtype: npt.DTypeLike, order: Literal["C", "F"] = "C"
) -> Self:
return cls(cp.empty(shape=shape, dtype=dtype, order=order))
@classmethod
def from_numpy_array(cls, array_like: npt.ArrayLike) -> Self:
"""Create a new buffer of Numpy array-like object
Parameters
----------
array_like
Object that can be coerced into a Numpy array
Returns
-------
New buffer representing `array_like`
"""
return cls(cp.asarray(array_like))
def as_numpy_array(self) -> npt.NDArray[Any]:
"""Returns the buffer as a NumPy array (host memory).
Warnings
--------
Might have to copy data, consider using `.as_ndarray_like()` instead.
Returns
-------
NumPy array of this buffer (might be a data copy)
"""
return cast("npt.NDArray[Any]", cp.asnumpy(self._data))
def __getitem__(self, key: Any) -> Self:
return self.__class__(self._data.__getitem__(key))
def __setitem__(self, key: Any, value: Any) -> None:
if isinstance(value, NDBuffer):
value = value._data
elif isinstance(value, core.NDBuffer):
gpu_value = NDBuffer(value.as_ndarray_like())
value = gpu_value._data
self._data.__setitem__(key, value)
buffer_prototype = BufferPrototype(buffer=Buffer, nd_buffer=NDBuffer)
register_buffer(Buffer, qualname="zarr.buffer.gpu.Buffer")
register_ndbuffer(NDBuffer, qualname="zarr.buffer.gpu.NDBuffer")
# backwards compatibility
register_buffer(Buffer, qualname="zarr.core.buffer.gpu.Buffer")
register_ndbuffer(NDBuffer, qualname="zarr.core.buffer.gpu.NDBuffer")
| NDBuffer |
python | patrick-kidger__equinox | equinox/nn/_attention.py | {
"start": 2682,
"end": 14017
} | class ____(Module):
r"""
Computes
$$\text{MultiheadAttention}(Q, K, V)
= \sum_i \text{Attention}\left(QW^Q_i, KW^K_i, VW^V_i\right)W^O_i$$
where:
- The inputs are
$Q \in \mathbb{R}^{d_\text{seq} \times d_\text{query}}$,
$K \in \mathbb{R}^{d_\text{seq} \times d_\text{key}}$,
$V \in \mathbb{R}^{d_\text{seq} \times d_\text{value}}$.
These are referred to as query, key, and value respectively. Meanwhile
$d_\text{seq}$ is the sequence length, and $d_\text{query}$, $d_\text{key}$,
$d_\text{value}$ are numbers of channels.
- The trainable weights are
$W^Q_i \in \mathbb{R}^{d_\text{query} \times d_\text{qk}}$,
$W^K_i \in \mathbb{R}^{d_\text{key} \times d_\text{qk}}$,
$W^V_i \in \mathbb{R}^{d_\text{value} \times d_\text{vo}}$,
$W^O_i \in \mathbb{R}^{d_\text{vo} \times d_\text{output}}$,
with $i \in \{1, \ldots, h\}$, where $h$ is the number of heads, and $d_\text{qk}$,
$d_\text{vo}$, $d_\text{output}$ are hyperparameters.
- $\text{Attention}$ is defined as
$\text{Attention}(\widetilde{Q}, \widetilde{K}, \widetilde{V})
= \text{softmax}(\frac{\widetilde{Q}\widetilde{K}^\intercal}
{\sqrt{d_\text{qk}}})\widetilde{V}$.
??? cite
[Attention is All You Need](https://arxiv.org/abs/1706.03762)
```bibtex
@inproceedings{vaswani2017attention,
author={Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and
Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and
Kaiser, {\L}ukasz and Polosukhin, Illia},
booktitle={Advances in Neural Information Processing Systems},
publisher={Curran Associates, Inc.},
title={Attention is All You Need},
volume={30},
year={2017}
}
```
!!! faq "FAQ"
Different software libraries often implement multihead attention in slightly
different ways. Some of them will or won't add on biases by default. Most of
them will fix the values of $d_\text{qk}, d_\text{vo}, d_\text{output}$ in
terms of $d_\text{query}$ or $d_\text{key}$ or $d_\text{value}$. Equinox
chooses to expose all of these as options.
Relative to the original
[Attention is All You Need](https://arxiv.org/abs/1706.03762) paper: our
$d_\text{qk}$ is their "$d_k$". Our $d_\text{vo}$ is their "$d_\text{v}$". They
fix $d_\text{query} = d_\text{key} = d_\text{value} = d_\text{output}$ and
refer to it as "$d_\text{model}$".
"""
query_proj: Linear
key_proj: Linear
value_proj: Linear
output_proj: Linear
dropout: Dropout
num_heads: int = field(static=True)
query_size: int = field(static=True)
key_size: int = field(static=True)
value_size: int = field(static=True)
output_size: int = field(static=True)
qk_size: int = field(static=True)
vo_size: int = field(static=True)
use_query_bias: bool = field(static=True)
use_key_bias: bool = field(static=True)
use_value_bias: bool = field(static=True)
use_output_bias: bool = field(static=True)
def __init__(
self,
num_heads: int,
query_size: int,
key_size: int | None = None,
value_size: int | None = None,
output_size: int | None = None,
qk_size: int | None = None,
vo_size: int | None = None,
use_query_bias: bool = False,
use_key_bias: bool = False,
use_value_bias: bool = False,
use_output_bias: bool = False,
dropout_p: float = 0.0,
inference: bool = False,
dtype=None,
*,
key: PRNGKeyArray,
):
r"""**Arguments:**
- `num_heads`: Number of parallel attention heads $h$.
- `query_size`: Number of input channels for query $Q$.
- `key_size`: Number of input channels for key $K$. Defaults to `query_size`.
- `value_size`: Number of input channels for value $V$. Defaults to
`query_size`.
- `output_size`: Number of output channels. Defaults to `query_size`.
- `qk_size`: Number of channels to compare query and key over, per head.
Defaults to `query_size // num_heads`.
- `vo_size`: Number of channels to compare attention-weighted value and output
over, per head. Defaults to `query_size // num_heads`.
- `use_query_bias`: Whether to use a bias term in the query projections.
- `use_key_bias`: Whether to use a bias term in the key projections.
- `use_value_bias`: Whether to use a bias term in the value projections.
- `use_output_bias`: Whether to use a bias term in the output projection.
- `dropout_p`: Dropout probability on attention weights.
- `inference`: Whether to actually apply dropout at all. If `True` then dropout
is not applied. If `False` then dropout is applied. This may be toggled
with [`equinox.nn.inference_mode`][] or overridden during
[`equinox.nn.MultiheadAttention.__call__`][].
- `dtype`: The dtype to use for all trainable parameters in this layer.
Defaults to either `jax.numpy.float32` or `jax.numpy.float64` depending
on whether JAX is in 64-bit mode.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initialisation. (Keyword only argument.)
"""
dtype = default_floating_dtype() if dtype is None else dtype
qkey, kkey, vkey, okey = jrandom.split(key, 4)
if key_size is None:
key_size = query_size
if value_size is None:
value_size = query_size
if qk_size is None:
qk_size = query_size // num_heads
if vo_size is None:
vo_size = query_size // num_heads
if output_size is None:
output_size = query_size
self.query_proj = Linear(
query_size,
num_heads * qk_size,
use_bias=use_query_bias,
dtype=dtype,
key=qkey,
)
self.key_proj = Linear(
key_size, num_heads * qk_size, use_bias=use_key_bias, dtype=dtype, key=kkey
)
self.value_proj = Linear(
value_size,
num_heads * vo_size,
use_bias=use_value_bias,
dtype=dtype,
key=vkey,
)
self.output_proj = Linear(
num_heads * vo_size,
output_size,
use_bias=use_output_bias,
dtype=dtype,
key=okey,
)
self.dropout = Dropout(dropout_p, inference=inference)
self.num_heads = num_heads
self.query_size = query_size
self.key_size = key_size
self.value_size = value_size
self.output_size = output_size
self.qk_size = qk_size
self.vo_size = vo_size
self.use_query_bias = use_query_bias
self.use_key_bias = use_key_bias
self.use_value_bias = use_value_bias
self.use_output_bias = use_output_bias
@named_scope("eqx.nn.MultiheadAttention")
def __call__(
self,
query: Float[Array, "q_seq q_size"],
key_: Float[Array, "kv_seq k_size"],
value: Float[Array, "kv_seq v_size"],
mask: None | _Mask = None,
*,
key: PRNGKeyArray | None = None,
inference: bool | None = None,
deterministic: bool | None = None,
process_heads: None | _ProcessHeads = None,
) -> Float[Array, "q_seq o_size"]:
"""**Arguments:**
- `query`: Query embedding. Should be a JAX array of shape
`(query_seq_length, query_size)`.
- `key_`: Key embedding. Should be a JAX array of shape
`(kv_seq_length, key_size)`.
- `value`: Value embedding. Should be a JAX array of shape
`(kv_seq_length, value_size)`.
- `mask`: Optional mask preventing attention to certain positions. Should either
be a JAX array of shape `(query_seq_length, kv_seq_length)`, or (for custom
per-head masking) `(num_heads, query_seq_length, kv_seq_length)`. A value of
`False` at a position indicates that position should be ignored.
- `key`: A `jax.random.PRNGKey` used for dropout. Unused if `dropout = 0`.
(Keyword only argument.)
- `inference`: As [`equinox.nn.Dropout.__call__`][]. (Keyword only
argument.)
- `deterministic`: (Deprecated in favour of `inference`.)
- `process_heads`: A function that takes in the query, key, and value heads and
returns new query, key, and value heads. For example, this can be
used to implement relative positional embeddings -
see e.g. `RotaryPositionalEmbedding`for an example. (Keyword only argument.)
**Returns:**
A JAX array of shape `(query_seq_length, output_size)`.
"""
if deterministic is not None:
inference = deterministic
warnings.warn(
"MultiheadAttention()(deterministic=...) is deprecated "
"in favour of MultiheadAttention()(inference=...)"
)
query_seq_length, _ = query.shape
kv_seq_length, _ = key_.shape
kv_seq_length2, _ = value.shape
if kv_seq_length != kv_seq_length2:
# query length can be different
raise ValueError("key and value must both be sequences of equal length.")
query_heads = self._project(self.query_proj, query)
key_heads = self._project(self.key_proj, key_)
value_heads = self._project(self.value_proj, value)
if process_heads is not None:
q_shape, k_shape, v_shape = (
query_heads.shape,
key_heads.shape,
value_heads.shape,
)
query_heads, key_heads, value_heads = process_heads(
query_heads, key_heads, value_heads
)
if (
query_heads.shape != q_shape
or key_heads.shape != k_shape
or value_heads.shape != v_shape
):
raise ValueError(
"process_heads must not change the shape of the heads."
)
attn_fn = partial(
dot_product_attention, dropout=self.dropout, inference=inference
)
keys = None if key is None else jax.random.split(key, query_heads.shape[1])
if mask is not None and mask.ndim == 3:
# Batch `mask` and `keys` down their 0-th dimension.
attn = jax.vmap(attn_fn, in_axes=1, out_axes=1)(
query_heads, key_heads, value_heads, mask=mask, key=keys
)
else:
# Batch `keys` down its 0-th dimension.
attn = jax.vmap(ft.partial(attn_fn, mask=mask), in_axes=1, out_axes=1)(
query_heads, key_heads, value_heads, key=keys
)
attn = attn.reshape(query_seq_length, -1)
return jax.vmap(self.output_proj)(attn)
def _project(self, proj, x):
seq_length, _ = x.shape
projection = jax.vmap(proj)(x)
return projection.reshape(seq_length, self.num_heads, -1)
| MultiheadAttention |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/decorators.py | {
"start": 6756,
"end": 8328
} | class ____:
"""
Common class for things that take Dag-like arguments.
This exists so we don't need to define these arguments separately for
``@asset`` and ``@asset.multi``.
"""
schedule: ScheduleArg
is_paused_upon_creation: bool | None = None
dag_id: str | None = None
dag_display_name: str | None = None
description: str | None = None
params: ParamsDict | None = None
on_success_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None
on_failure_callback: None | DagStateChangeCallback | list[DagStateChangeCallback] = None
access_control: dict[str, dict[str, Collection[str]]] | None = None
owner_links: dict[str, str] = attrs.field(factory=dict)
tags: Collection[str] = attrs.field(factory=set)
def create_dag(self, *, default_dag_id: str) -> DAG:
from airflow.sdk.definitions.dag import DAG
dag_id = self.dag_id or default_dag_id
return DAG(
dag_id=dag_id,
schedule=self.schedule,
is_paused_upon_creation=self.is_paused_upon_creation,
catchup=False,
dag_display_name=self.dag_display_name or dag_id,
description=self.description,
params=self.params,
on_success_callback=self.on_success_callback,
on_failure_callback=self.on_failure_callback,
access_control=self.access_control,
owner_links=self.owner_links,
tags=self.tags,
auto_register=True,
)
@attrs.define(kw_only=True)
| _DAGFactory |
python | django-import-export__django-import-export | tests/core/tests/test_instance_loaders.py | {
"start": 860,
"end": 1802
} | class ____(TestCase):
def setUp(self):
self.resource = resources.modelresource_factory(Book)()
self.dataset = tablib.Dataset(headers=["id", "name", "author_email"])
self.book = Book.objects.create(name="Some book")
self.book2 = Book.objects.create(name="Some other book")
row = [str(self.book.pk), "Some book", "test@example.com"]
self.dataset.append(row)
self.instance_loader = instance_loaders.CachedInstanceLoader(
self.resource, self.dataset
)
def test_all_instances(self):
self.assertTrue(self.instance_loader.all_instances)
self.assertEqual(len(self.instance_loader.all_instances), 1)
self.assertEqual(list(self.instance_loader.all_instances), [self.book.pk])
def test_get_instance(self):
obj = self.instance_loader.get_instance(self.dataset.dict[0])
self.assertEqual(obj, self.book)
| CachedInstanceLoaderTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.