language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | plotly__plotly.py | plotly/graph_objs/layout/newshape/_legendgrouptitle.py | {
"start": 235,
"end": 2999
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.newshape"
_path_str = "layout.newshape.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.newshape.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.newshape.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.newshap
e.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.newshape.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.newshape.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Legendgrouptitle |
python | huggingface__transformers | src/transformers/models/instructblipvideo/processing_instructblipvideo.py | {
"start": 1123,
"end": 7992
} | class ____(ProcessorMixin):
r"""
Constructs an InstructBLIPVideo processor which wraps a InstructBLIP image processor and a LLaMa/T5 tokenizer into a single
processor.
[`InstructBlipVideoProcessor`] offers all the functionalities of [`InstructBlipVideoVideoProcessor`] and [`AutoTokenizer`]. See the
docstring of [`~InstructBlipVideoProcessor.__call__`] and [`~InstructBlipVideoProcessor.decode`] for more information.
Args:
video_processor (`InstructBlipVideoVideoProcessor`):
An instance of [`InstructBlipVideoVideoProcessor`]. The video processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
qformer_tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
num_query_tokens (`int`, *optional*):
Number of tokens used by the Qformer as queries, should be same as in model's config.
"""
def __init__(self, video_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs):
if not hasattr(tokenizer, "video_token"):
self.video_token = AddedToken("<video>", normalized=False, special=True)
tokenizer.add_tokens([self.video_token], special_tokens=True)
else:
self.video_token = tokenizer.video_token
self.num_query_tokens = num_query_tokens
super().__init__(video_processor, tokenizer, qformer_tokenizer)
def __call__(
self,
images: Optional[VideoInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_token_type_ids: bool = False,
return_length: bool = False,
verbose: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchFeature:
"""
This method uses [`InstructBlipVideoVideoProcessor.__call__`] method to prepare image(s) or video(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
"""
if images is None and text is None:
raise ValueError("You have to specify at least one of images or text.")
encoding = {}
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
qformer_text_encoding = self.qformer_tokenizer(
text=text,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_token_type_ids=return_token_type_ids,
return_length=return_length,
verbose=verbose,
return_tensors=return_tensors,
**kwargs,
)
encoding["qformer_input_ids"] = qformer_text_encoding.pop("input_ids")
encoding["qformer_attention_mask"] = qformer_text_encoding.pop("attention_mask")
# We need this hacky manipulation because BLIP expects image tokens to be at the beginning even before BOS token
# InstrucBLIP works with 4 frames only
if max_length is not None:
max_length -= self.num_query_tokens
text_encoding = self.tokenizer(
text=text,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_token_type_ids=return_token_type_ids,
return_length=return_length,
verbose=verbose,
return_tensors=None, # required to concatenate below
**kwargs,
)
if images is not None:
video_tokens = self.video_token.content * self.num_query_tokens * 4
video_text_encoding = self.tokenizer(
video_tokens,
add_special_tokens=False, # required to concatenate below
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_token_type_ids=return_token_type_ids,
return_length=return_length,
return_tensors=None,
)
for k in text_encoding:
text_encoding[k] = [video_text_encoding[k] + sample for sample in text_encoding[k]]
encoding.update(text_encoding)
if images is not None:
image_encoding = self.video_processor(images, return_tensors=return_tensors)
encoding.update(image_encoding)
encoding = BatchFeature(encoding, tensor_type=return_tensors)
return encoding
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
video_processor_input_names = self.video_processor.model_input_names
qformer_input_names = ["qformer_input_ids", "qformer_attention_mask"]
return tokenizer_input_names + video_processor_input_names + qformer_input_names
__all__ = ["InstructBlipVideoProcessor"]
| InstructBlipVideoProcessor |
python | langchain-ai__langchain | libs/core/langchain_core/messages/ai.py | {
"start": 2757,
"end": 4437
} | class ____(TypedDict):
"""Usage metadata for a message, such as token counts.
This is a standard representation of token usage that is consistent across models.
Example:
```python
{
"input_tokens": 350,
"output_tokens": 240,
"total_tokens": 590,
"input_token_details": {
"audio": 10,
"cache_creation": 200,
"cache_read": 100,
},
"output_token_details": {
"audio": 10,
"reasoning": 200,
},
}
```
!!! warning "Behavior changed in `langchain-core` 0.3.9"
Added `input_token_details` and `output_token_details`.
!!! note "LangSmith SDK"
The LangSmith SDK also has a `UsageMetadata` class. While the two share fields,
LangSmith's `UsageMetadata` has additional fields to capture cost information
used by the LangSmith platform.
"""
input_tokens: int
"""Count of input (or prompt) tokens. Sum of all input token types."""
output_tokens: int
"""Count of output (or completion) tokens. Sum of all output token types."""
total_tokens: int
"""Total token count. Sum of `input_tokens` + `output_tokens`."""
input_token_details: NotRequired[InputTokenDetails]
"""Breakdown of input token counts.
Does *not* need to sum to full input token count. Does *not* need to have all keys.
"""
output_token_details: NotRequired[OutputTokenDetails]
"""Breakdown of output token counts.
Does *not* need to sum to full output token count. Does *not* need to have all keys.
"""
| UsageMetadata |
python | python-pillow__Pillow | src/PIL/WmfImagePlugin.py | {
"start": 1948,
"end": 5244
} | class ____(ImageFile.StubImageFile):
format = "WMF"
format_description = "Windows Metafile"
def _open(self) -> None:
# check placeable header
s = self.fp.read(44)
if s.startswith(b"\xd7\xcd\xc6\x9a\x00\x00"):
# placeable windows metafile
# get units per inch
inch = word(s, 14)
if inch == 0:
msg = "Invalid inch"
raise ValueError(msg)
self._inch: tuple[float, float] = inch, inch
# get bounding box
x0 = short(s, 6)
y0 = short(s, 8)
x1 = short(s, 10)
y1 = short(s, 12)
# normalize size to 72 dots per inch
self.info["dpi"] = 72
size = (
(x1 - x0) * self.info["dpi"] // inch,
(y1 - y0) * self.info["dpi"] // inch,
)
self.info["wmf_bbox"] = x0, y0, x1, y1
# sanity check (standard metafile header)
if s[22:26] != b"\x01\x00\t\x00":
msg = "Unsupported WMF file format"
raise SyntaxError(msg)
elif s.startswith(b"\x01\x00\x00\x00") and s[40:44] == b" EMF":
# enhanced metafile
# get bounding box
x0 = _long(s, 8)
y0 = _long(s, 12)
x1 = _long(s, 16)
y1 = _long(s, 20)
# get frame (in 0.01 millimeter units)
frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)
size = x1 - x0, y1 - y0
# calculate dots per inch from bbox and frame
xdpi = 2540.0 * (x1 - x0) / (frame[2] - frame[0])
ydpi = 2540.0 * (y1 - y0) / (frame[3] - frame[1])
self.info["wmf_bbox"] = x0, y0, x1, y1
if xdpi == ydpi:
self.info["dpi"] = xdpi
else:
self.info["dpi"] = xdpi, ydpi
self._inch = xdpi, ydpi
else:
msg = "Unsupported file format"
raise SyntaxError(msg)
self._mode = "RGB"
self._size = size
loader = self._load()
if loader:
loader.open(self)
def _load(self) -> ImageFile.StubHandler | None:
return _handler
def load(
self, dpi: float | tuple[float, float] | None = None
) -> Image.core.PixelAccess | None:
if dpi is not None:
self.info["dpi"] = dpi
x0, y0, x1, y1 = self.info["wmf_bbox"]
if not isinstance(dpi, tuple):
dpi = dpi, dpi
self._size = (
int((x1 - x0) * dpi[0] / self._inch[0]),
int((y1 - y0) * dpi[1] / self._inch[1]),
)
return super().load()
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if _handler is None or not hasattr(_handler, "save"):
msg = "WMF save handler not installed"
raise OSError(msg)
_handler.save(im, fp, filename)
#
# --------------------------------------------------------------------
# Registry stuff
Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
Image.register_save(WmfStubImageFile.format, _save)
Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"])
| WmfStubImageFile |
python | lxml__lxml | src/lxml/html/tests/test_html5parser.py | {
"start": 1175,
"end": 1739
} | class ____(unittest.TestCase):
def make_one(self, **kwargs):
from lxml.html.html5parser import XHTMLParser
return XHTMLParser(**kwargs)
@skipUnless(hasattr(html5lib, 'XHTMLParser'),
'xhtml5lib does not have XHTMLParser')
def test_integration(self):
# XXX: This test are untested. (html5lib no longer has an XHTMLParser)
parser = self.make_one(strict=True)
tree = parser.parse(XHTML_TEST_DOCUMENT)
root = tree.getroot()
self.assertEqual(root.tag, xhtml_tag('html'))
| Test_XHTMLParser |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 25057,
"end": 25181
} | class ____(Structure):
_fields_ = (("umbrella", lc_str),)
def describe(self):
return {}
| sub_framework_command |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/selector.py | {
"start": 8944,
"end": 9662
} | class ____:
location_name: str
repository_name: str
name: str
def to_graphql_input(self):
return {
"repositoryLocationName": self.location_name,
"repositoryName": self.repository_name,
"name": self.name,
}
@staticmethod
def from_graphql_input(graphql_data):
return InstigatorSelector(
location_name=graphql_data["repositoryLocationName"],
repository_name=graphql_data["repositoryName"],
name=graphql_data["name"],
)
def get_id(self) -> str:
return create_snapshot_id(self)
@property
def instigator_name(self) -> str:
return self.name
@record
| InstigatorSelector |
python | paramiko__paramiko | tests/test_config.py | {
"start": 29690,
"end": 30951
} | class ____:
def test_matches_target_host_not_hostname(self):
result = load_config("match-orighost").lookup("target")
assert result["hostname"] == "bogus"
assert result["user"] == "tuon"
def test_matches_target_host_not_canonicalized_name(self, socket):
result = load_config("match-orighost-canonical").lookup("www")
assert result["hostname"] == "www.paramiko.org"
assert result["user"] == "tuon"
def test_may_be_globbed(self):
result = load_config("match-orighost").lookup("whatever")
assert result["user"] == "matrim"
def test_may_be_comma_separated_list(self):
for target in ("comma", "separated"):
result = load_config("match-orighost").lookup(target)
assert result["user"] == "chameleon"
def test_comma_separated_list_may_have_internal_negation(self):
result = load_config("match-orighost").lookup("nope")
assert "user" not in result
def test_may_be_negated(self):
result = load_config("match-orighost").lookup("docs")
assert result["user"] == "thom"
def test_requires_an_argument(self):
with raises(ConfigParseError):
load_config("match-orighost-no-arg")
| TestMatchOriginalHost |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 58556,
"end": 61189
} | class ____:
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_allclose(cb, cbrl, atol=1.5e-7, rtol=0)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_allclose(cb1, cbrl1, atol=1.5e-8, rtol=0)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_allclose(cdg, cdgrl, atol=1.5e-8, rtol=0)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_allclose(cdgm, cdgmrl, atol=1.5e-8, rtol=0)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_allclose(cs, csrl, atol=1.5e-8, rtol=0)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_allclose(ct, ctrl, atol=1.5e-8, rtol=0)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_allclose(ct1, ctrl1, atol=1.5e-8, rtol=0)
def test_specialpoints(self):
assert_allclose(special.cotdg(45), 1.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(-45), -1.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(90), 0.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(-90), 0.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(135), -1.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(-135), 1.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(225), 1.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(-225), -1.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(270), 0.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(-270), 0.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(315), -1.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(-315), 1.0, atol=1.5e-14, rtol=0)
assert_allclose(special.cotdg(765), 1.0, atol=1.5e-14, rtol=0)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_allclose(snm, snmrl, atol=1.5e-8, rtol=0)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_allclose(snm1, snmrl1, atol=1.5e-8, rtol=0)
| TestTrigonometric |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/provided_non_null_arguments.py | {
"start": 115,
"end": 1869
} | class ____(ValidationRule):
def leave_Field(self, node, key, parent, path, ancestors):
field_def = self.context.get_field_def()
if not field_def:
return False
arg_asts = node.arguments or []
arg_ast_map = {arg.name.value: arg for arg in arg_asts}
for arg_name, arg_def in field_def.args.items():
arg_ast = arg_ast_map.get(arg_name, None)
if not arg_ast and isinstance(arg_def.type, GraphQLNonNull):
self.context.report_error(GraphQLError(
self.missing_field_arg_message(node.name.value, arg_name, arg_def.type),
[node]
))
def leave_Directive(self, node, key, parent, path, ancestors):
directive_def = self.context.get_directive()
if not directive_def:
return False
arg_asts = node.arguments or []
arg_ast_map = {arg.name.value: arg for arg in arg_asts}
for arg_name, arg_def in directive_def.args.items():
arg_ast = arg_ast_map.get(arg_name, None)
if not arg_ast and isinstance(arg_def.type, GraphQLNonNull):
self.context.report_error(GraphQLError(
self.missing_directive_arg_message(node.name.value, arg_name, arg_def.type),
[node]
))
@staticmethod
def missing_field_arg_message(name, arg_name, type):
return 'Field "{}" argument "{}" of type "{}" is required but not provided.'.format(name, arg_name, type)
@staticmethod
def missing_directive_arg_message(name, arg_name, type):
return 'Directive "{}" argument "{}" of type "{}" is required but not provided.'.format(name, arg_name, type)
| ProvidedNonNullArguments |
python | davidhalter__jedi | jedi/inference/value/iterable.py | {
"start": 889,
"end": 1538
} | class ____:
def py__next__(self, contextualized_node=None):
return self.py__iter__(contextualized_node)
def py__stop_iteration_returns(self):
return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')])
# At the moment, safe values are simple values like "foo", 1 and not
# lists/dicts. Therefore as a small speed optimization we can just do the
# default instead of resolving the lazy wrapped values, that are just
# doing this in the end as well.
# This mostly speeds up patterns like `sys.version_info >= (3, 0)` in
# typeshed.
get_safe_value = Value.get_safe_value
| IterableMixin |
python | mlflow__mlflow | mlflow/langchain/chat_agent_langgraph.py | {
"start": 2480,
"end": 11007
} | class ____(TypedDict):
"""
Helper class that enables building a LangGraph agent that produces ChatAgent-compatible
messages as state is updated. Other ChatAgent request fields (custom_inputs, context) and
response fields (custom_outputs) are also exposed within the state so they can be used and
updated over the course of agent execution. Use this class with
:py:class:`ChatAgentToolNode <mlflow.langchain.chat_agent_langgraph.ChatAgentToolNode>`.
**LangGraph ChatAgent Example**
This example has been tested to work with LangGraph 0.2.70.
Step 1: Create the LangGraph Agent
This example is adapted from LangGraph's
`create_react_agent <https://langchain-ai.github.io/langgraph/how-tos/create-react-agent/>`__
documentation. The notable differences are changes to be ChatAgent compatible. They include:
- We use :py:class:`ChatAgentState <mlflow.langchain.chat_agent_langgraph.ChatAgentState>`,
which has an internal state of
:py:class:`ChatAgentMessage <mlflow.types.agent.ChatAgentMessage>`
objects and a ``custom_outputs`` attribute under the hood
- We use :py:class:`ChatAgentToolNode <mlflow.langchain.chat_agent_langgraph.ChatAgentToolNode>`
instead of LangGraph's ToolNode to enable returning attachments and custom_outputs from
LangChain and UnityCatalog Tools
.. code-block:: python
from typing import Optional, Sequence, Union
from langchain_core.language_models import LanguageModelLike
from langchain_core.runnables import RunnableConfig, RunnableLambda
from langchain_core.tools import BaseTool
from langgraph.graph import END, StateGraph
from langgraph.graph.state import CompiledStateGraph
from langgraph.prebuilt import ToolNode
from mlflow.langchain.chat_agent_langgraph import ChatAgentState, ChatAgentToolNode
def create_tool_calling_agent(
model: LanguageModelLike,
tools: Union[ToolNode, Sequence[BaseTool]],
agent_prompt: Optional[str] = None,
) -> CompiledStateGraph:
model = model.bind_tools(tools)
def routing_logic(state: ChatAgentState):
last_message = state["messages"][-1]
if last_message.get("tool_calls"):
return "continue"
else:
return "end"
if agent_prompt:
system_message = {"role": "system", "content": agent_prompt}
preprocessor = RunnableLambda(
lambda state: [system_message] + state["messages"]
)
else:
preprocessor = RunnableLambda(lambda state: state["messages"])
model_runnable = preprocessor | model
def call_model(
state: ChatAgentState,
config: RunnableConfig,
):
response = model_runnable.invoke(state, config)
return {"messages": [response]}
workflow = StateGraph(ChatAgentState)
workflow.add_node("agent", RunnableLambda(call_model))
workflow.add_node("tools", ChatAgentToolNode(tools))
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent",
routing_logic,
{
"continue": "tools",
"end": END,
},
)
workflow.add_edge("tools", "agent")
return workflow.compile()
Step 2: Define the LLM and your tools
If you want to return attachments and custom_outputs from your tool, you can return a
dictionary with keys "content", "attachments", and "custom_outputs". This dictionary will be
parsed out by the ChatAgentToolNode and properly stored in your LangGraph's state.
.. code-block:: python
from random import randint
from typing import Any
from databricks_langchain import ChatDatabricks
from langchain_core.tools import tool
@tool
def generate_random_ints(min: int, max: int, size: int) -> dict[str, Any]:
\"""Generate size random ints in the range [min, max].\"""
attachments = {"min": min, "max": max}
custom_outputs = [randint(min, max) for _ in range(size)]
content = f"Successfully generated array of {size} random ints in [{min}, {max}]."
return {
"content": content,
"attachments": attachments,
"custom_outputs": {"random_nums": custom_outputs},
}
mlflow.langchain.autolog()
tools = [generate_random_ints]
llm = ChatDatabricks(endpoint="databricks-meta-llama-3-3-70b-instruct")
langgraph_agent = create_tool_calling_agent(llm, tools)
Step 3: Wrap your LangGraph agent with ChatAgent
This makes your agent easily loggable and deployable with the PyFunc flavor in serving.
.. code-block:: python
from typing import Any, Generator, Optional
from langgraph.graph.state import CompiledStateGraph
from mlflow.pyfunc import ChatAgent
from mlflow.types.agent import (
ChatAgentChunk,
ChatAgentMessage,
ChatAgentResponse,
ChatContext,
)
class LangGraphChatAgent(ChatAgent):
def __init__(self, agent: CompiledStateGraph):
self.agent = agent
def predict(
self,
messages: list[ChatAgentMessage],
context: Optional[ChatContext] = None,
custom_inputs: Optional[dict[str, Any]] = None,
) -> ChatAgentResponse:
request = {"messages": self._convert_messages_to_dict(messages)}
messages = []
for event in self.agent.stream(request, stream_mode="updates"):
for node_data in event.values():
messages.extend(
ChatAgentMessage(**msg) for msg in node_data.get("messages", [])
)
return ChatAgentResponse(messages=messages)
def predict_stream(
self,
messages: list[ChatAgentMessage],
context: Optional[ChatContext] = None,
custom_inputs: Optional[dict[str, Any]] = None,
) -> Generator[ChatAgentChunk, None, None]:
request = {"messages": self._convert_messages_to_dict(messages)}
for event in self.agent.stream(request, stream_mode="updates"):
for node_data in event.values():
yield from (
ChatAgentChunk(**{"delta": msg}) for msg in node_data["messages"]
)
chat_agent = LangGraphChatAgent(langgraph_agent)
Step 4: Test out your model
Call ``.predict()`` and ``.predict_stream`` with dictionaries with the ChatAgentRequest schema.
.. code-block:: python
chat_agent.predict({"messages": [{"role": "user", "content": "What is 10 + 10?"}]})
for event in chat_agent.predict_stream(
{"messages": [{"role": "user", "content": "Generate me a few random nums"}]}
):
print(event)
This LangGraph ChatAgent can be logged with the logging code described in the "Logging a
ChatAgent" section of the docstring of :py:class:`ChatAgent <mlflow.pyfunc.ChatAgent>`.
"""
messages: Annotated[list[dict[str, Any]], _add_agent_messages]
context: dict[str, Any] | None
custom_inputs: dict[str, Any] | None
custom_outputs: dict[str, Any] | None
def parse_message(
msg: AnyMessage, name: str | None = None, attachments: dict[str, Any] | None = None
) -> dict[str, Any]:
"""
Parse different LangChain message types into their ChatAgentMessage schema dict equivalents
"""
chat_message_dict = convert_lc_message_to_chat_message(msg).model_dump()
chat_message_dict["attachments"] = attachments
chat_message_dict["name"] = msg.name or name
chat_message_dict["id"] = msg.id
# _convert_to_message from langchain_core.messages.utils expects an empty string instead of None
if not chat_message_dict.get("content"):
chat_message_dict["content"] = ""
chat_agent_msg = ChatAgentMessage(**chat_message_dict)
return chat_agent_msg.model_dump(exclude_none=True)
| ChatAgentState |
python | langchain-ai__langchain | libs/core/langchain_core/documents/base.py | {
"start": 992,
"end": 1897
} | class ____(Serializable):
"""Base class for content used in retrieval and data processing workflows.
Provides common fields for content that needs to be stored, indexed, or searched.
!!! note
For multimodal content in **chat messages** (images, audio sent to/from LLMs),
use `langchain.messages` content blocks instead.
"""
# The ID field is optional at the moment.
# It will likely become required in a future major release after
# it has been adopted by enough VectorStore implementations.
id: str | None = Field(default=None, coerce_numbers_to_str=True)
"""An optional identifier for the document.
Ideally this should be unique across the document collection and formatted
as a UUID, but this will not be enforced.
"""
metadata: dict = Field(default_factory=dict)
"""Arbitrary metadata associated with the content."""
| BaseMedia |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/evaluator.py | {
"start": 1340,
"end": 12353
} | class ____:
def __init__(self, target_cls=None):
self.target_cls = target_cls
def process(self, clause, *clauses):
if clauses:
clause = and_(clause, *clauses)
meth = getattr(self, f"visit_{clause.__visit_name__}", None)
if not meth:
raise UnevaluatableError(
f"Cannot evaluate {type(clause).__name__}"
)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_false(self, clause):
return lambda obj: False
def visit_true(self, clause):
return lambda obj: True
def visit_column(self, clause):
try:
parentmapper = clause._annotations["parentmapper"]
except KeyError as ke:
raise UnevaluatableError(
f"Cannot evaluate column: {clause}"
) from ke
if self.target_cls and not issubclass(
self.target_cls, parentmapper.class_
):
raise UnevaluatableError(
"Can't evaluate criteria against "
f"alternate class {parentmapper.class_}"
)
parentmapper._check_configure()
# we'd like to use "proxy_key" annotation to get the "key", however
# in relationship primaryjoin cases proxy_key is sometimes deannotated
# and sometimes apparently not present in the first place (?).
# While I can stop it from being deannotated (though need to see if
# this breaks other things), not sure right now about cases where it's
# not there in the first place. can fix at some later point.
# key = clause._annotations["proxy_key"]
# for now, use the old way
try:
key = parentmapper._columntoproperty[clause].key
except orm_exc.UnmappedColumnError as err:
raise UnevaluatableError(
f"Cannot evaluate expression: {err}"
) from err
# note this used to fall back to a simple `getattr(obj, key)` evaluator
# if impl was None; as of #8656, we ensure mappers are configured
# so that impl is available
impl = parentmapper.class_manager[key].impl
def get_corresponding_attr(obj):
if obj is None:
return _NO_OBJECT
state = inspect(obj)
dict_ = state.dict
value = impl.get(
state, dict_, passive=PassiveFlag.PASSIVE_NO_FETCH
)
if value is LoaderCallableStatus.PASSIVE_NO_RESULT:
return _EXPIRED_OBJECT
return value
return get_corresponding_attr
def visit_tuple(self, clause):
return self.visit_clauselist(clause)
def visit_expression_clauselist(self, clause):
return self.visit_clauselist(clause)
def visit_clauselist(self, clause):
evaluators = [self.process(clause) for clause in clause.clauses]
dispatch = (
f"visit_{clause.operator.__name__.rstrip('_')}_clauselist_op"
)
meth = getattr(self, dispatch, None)
if meth:
return meth(clause.operator, evaluators, clause)
else:
raise UnevaluatableError(
f"Cannot evaluate clauselist with operator {clause.operator}"
)
def visit_binary(self, clause):
eval_left = self.process(clause.left)
eval_right = self.process(clause.right)
dispatch = f"visit_{clause.operator.__name__.rstrip('_')}_binary_op"
meth = getattr(self, dispatch, None)
if meth:
return meth(clause.operator, eval_left, eval_right, clause)
else:
raise UnevaluatableError(
f"Cannot evaluate {type(clause).__name__} with "
f"operator {clause.operator}"
)
def visit_or_clauselist_op(self, operator, evaluators, clause):
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value is _EXPIRED_OBJECT:
return _EXPIRED_OBJECT
elif value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
return evaluate
def visit_and_clauselist_op(self, operator, evaluators, clause):
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value is _EXPIRED_OBJECT:
return _EXPIRED_OBJECT
if not value:
if value is None or value is _NO_OBJECT:
return None
return False
return True
return evaluate
def visit_comma_op_clauselist_op(self, operator, evaluators, clause):
def evaluate(obj):
values = []
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value is _EXPIRED_OBJECT:
return _EXPIRED_OBJECT
elif value is None or value is _NO_OBJECT:
return None
values.append(value)
return tuple(values)
return evaluate
def visit_custom_op_binary_op(
self, operator, eval_left, eval_right, clause
):
if operator.python_impl:
return self._straight_evaluate(
operator, eval_left, eval_right, clause
)
else:
raise UnevaluatableError(
f"Custom operator {operator.opstring!r} can't be evaluated "
"in Python unless it specifies a callable using "
"`.python_impl`."
)
def visit_is_binary_op(self, operator, eval_left, eval_right, clause):
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is _EXPIRED_OBJECT or right_val is _EXPIRED_OBJECT:
return _EXPIRED_OBJECT
return left_val == right_val
return evaluate
def visit_is_not_binary_op(self, operator, eval_left, eval_right, clause):
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is _EXPIRED_OBJECT or right_val is _EXPIRED_OBJECT:
return _EXPIRED_OBJECT
return left_val != right_val
return evaluate
def _straight_evaluate(self, operator, eval_left, eval_right, clause):
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is _EXPIRED_OBJECT or right_val is _EXPIRED_OBJECT:
return _EXPIRED_OBJECT
elif left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
return evaluate
def _straight_evaluate_numeric_only(
self, operator, eval_left, eval_right, clause
):
if clause.left.type._type_affinity not in (
Numeric,
Integer,
) or clause.right.type._type_affinity not in (Numeric, Integer):
raise UnevaluatableError(
f'Cannot evaluate math operator "{operator.__name__}" for '
f"datatypes {clause.left.type}, {clause.right.type}"
)
return self._straight_evaluate(operator, eval_left, eval_right, clause)
visit_add_binary_op = _straight_evaluate_numeric_only
visit_mul_binary_op = _straight_evaluate_numeric_only
visit_sub_binary_op = _straight_evaluate_numeric_only
visit_mod_binary_op = _straight_evaluate_numeric_only
visit_truediv_binary_op = _straight_evaluate_numeric_only
visit_lt_binary_op = _straight_evaluate
visit_le_binary_op = _straight_evaluate
visit_ne_binary_op = _straight_evaluate
visit_gt_binary_op = _straight_evaluate
visit_ge_binary_op = _straight_evaluate
visit_eq_binary_op = _straight_evaluate
def visit_in_op_binary_op(self, operator, eval_left, eval_right, clause):
return self._straight_evaluate(
lambda a, b: a in b if a is not _NO_OBJECT else None,
eval_left,
eval_right,
clause,
)
def visit_not_in_op_binary_op(
self, operator, eval_left, eval_right, clause
):
return self._straight_evaluate(
lambda a, b: a not in b if a is not _NO_OBJECT else None,
eval_left,
eval_right,
clause,
)
def visit_concat_op_binary_op(
self, operator, eval_left, eval_right, clause
):
if not issubclass(
clause.left.type._type_affinity, Concatenable
) or not issubclass(clause.right.type._type_affinity, Concatenable):
raise UnevaluatableError(
f"Cannot evaluate concatenate operator "
f'"{operator.__name__}" for '
f"datatypes {clause.left.type}, {clause.right.type}"
)
return self._straight_evaluate(
lambda a, b: a + b, eval_left, eval_right, clause
)
def visit_startswith_op_binary_op(
self, operator, eval_left, eval_right, clause
):
return self._straight_evaluate(
lambda a, b: a.startswith(b), eval_left, eval_right, clause
)
def visit_endswith_op_binary_op(
self, operator, eval_left, eval_right, clause
):
return self._straight_evaluate(
lambda a, b: a.endswith(b), eval_left, eval_right, clause
)
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is _EXPIRED_OBJECT:
return _EXPIRED_OBJECT
elif value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
f"Cannot evaluate {type(clause).__name__} "
f"with operator {clause.operator}"
)
def visit_bindparam(self, clause):
if clause.callable:
val = clause.callable()
else:
val = clause.value
return lambda obj: val
def __getattr__(name: str) -> Type[_EvaluatorCompiler]:
if name == "EvaluatorCompiler":
warn_deprecated(
"Direct use of 'EvaluatorCompiler' is not supported, and this "
"name will be removed in a future release. "
"'_EvaluatorCompiler' is for internal use only",
"2.0",
)
return _EvaluatorCompiler
else:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
| _EvaluatorCompiler |
python | coleifer__peewee | playhouse/postgres_ext.py | {
"start": 10984,
"end": 11042
} | class ____(Field):
field_type = 'INTERVAL'
| IntervalField |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 383642,
"end": 385766
} | class ____:
def test_alias(self):
# This test makes sure that "reciprocal" and "loguniform" are
# aliases of the same distribution and that both are log-uniform
rng = np.random.default_rng(98643218961)
rv = stats.loguniform(10 ** -3, 10 ** 0)
rvs = rv.rvs(size=10000, random_state=rng)
rng = np.random.default_rng(98643218961)
rv2 = stats.reciprocal(10 ** -3, 10 ** 0)
rvs2 = rv2.rvs(size=10000, random_state=rng)
assert_allclose(rvs2, rvs)
vals, _ = np.histogram(np.log10(rvs), bins=10)
assert 900 <= vals.min() <= vals.max() <= 1100
assert np.abs(np.median(vals) - 1000) <= 10
@pytest.mark.parametrize("method", ['mle', 'mm'])
def test_fit_override(self, method):
# loguniform is overparameterized, so check that fit override enforces
# scale=1 unless fscale is provided by the user
rng = np.random.default_rng(98643218961)
rvs = stats.loguniform.rvs(0.1, 1, size=1000, random_state=rng)
a, b, loc, scale = stats.loguniform.fit(rvs, method=method)
assert scale == 1
a, b, loc, scale = stats.loguniform.fit(rvs, fscale=2, method=method)
assert scale == 2
def test_overflow(self):
# original formulation had overflow issues; check that this is resolved
# Extensive accuracy tests elsewhere, no need to test all methods
rng = np.random.default_rng(7136519550773909093)
a, b = 1e-200, 1e200
dist = stats.loguniform(a, b)
# test roundtrip error
cdf = rng.uniform(0, 1, size=1000)
assert_allclose(dist.cdf(dist.ppf(cdf)), cdf)
rvs = dist.rvs(size=1000, random_state=rng)
assert_allclose(dist.ppf(dist.cdf(rvs)), rvs)
# test a property of the pdf (and that there is no overflow)
x = 10.**np.arange(-200, 200)
pdf = dist.pdf(x) # no overflow
assert_allclose(pdf[:-1]/pdf[1:], 10)
# check munp against wikipedia reference
mean = (b - a)/(np.log(b) - np.log(a))
assert_allclose(dist.mean(), mean)
| TestLogUniform |
python | walkccc__LeetCode | solutions/767. Reorganize String/767-2.py | {
"start": 0,
"end": 576
} | class ____:
def reorganizeString(self, s: str) -> str:
n = len(s)
count = collections.Counter(s)
maxCount = max(count.values())
if maxCount > (n + 1) // 2:
return ''
if maxCount == (n + 1) // 2:
maxLetter = max(count, key=count.get)
ans = [maxLetter if i % 2 == 0 else '' for i in range(n)]
del count[maxLetter]
i = 1
else:
ans = [''] * n
i = 0
for c, freq in count.items():
for _ in range(freq):
ans[i] = c
i += 2
if i >= n:
i = 1
return ''.join(ans)
| Solution |
python | getsentry__sentry | tests/sentry/feedback/endpoints/test_project_user_reports.py | {
"start": 518,
"end": 7986
} | class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1).isoformat()
self.environment = self.create_environment(project=self.project, name="production")
self.event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": self.min_ago,
"environment": self.environment.name,
},
project_id=self.project.id,
)
self.environment2 = self.create_environment(project=self.project, name="staging")
self.event2 = self.store_event(
data={
"event_id": "b" * 32,
"timestamp": self.min_ago,
"environment": self.environment2.name,
},
project_id=self.project.id,
)
self.report = UserReport.objects.create(
project_id=self.project.id,
environment_id=self.environment.id,
event_id="a" * 32,
name="Foo",
email="foo@example.com",
comments="Hello world",
group_id=self.event.group.id,
)
self.report2 = UserReport.objects.create(
project_id=self.project.id,
event_id="b" * 32,
name="Foo",
email="foo@example.com",
comments="Hello world",
group_id=self.event.group.id,
)
def test_simple(self) -> None:
self.login_as(user=self.user)
project = self.create_project()
event1 = self.store_event(
data={
"timestamp": timezone.now().isoformat(),
"event_id": "a" * 32,
"message": "something went wrong",
},
project_id=project.id,
)
group = event1.group
event2 = self.store_event(
data={
"timestamp": timezone.now().isoformat(),
"event_id": "c" * 32,
"message": "testing",
},
project_id=project.id,
)
group2 = event2.group
group2.status = GroupStatus.RESOLVED
group2.substatus = None
group2.save()
report_1 = UserReport.objects.create(
project_id=project.id,
event_id=event1.event_id,
name="Foo",
email="foo@example.com",
comments="Hello world",
group_id=group.id,
)
# should not be included due to missing link
UserReport.objects.create(
project_id=project.id,
event_id="b" * 32,
name="Bar",
email="bar@example.com",
comments="Hello world",
)
# should not be included due to resolution
UserReport.objects.create(
project_id=project.id,
event_id=event2.event_id,
name="Baz",
email="baz@example.com",
comments="Hello world",
group_id=group2.id,
)
url = _make_url(project)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert sorted(map(lambda x: x["id"], response.data)) == sorted([str(report_1.id)])
def test_cannot_access_with_dsn_auth(self) -> None:
project = self.create_project()
project_key = self.create_project_key(project=project)
url = _make_url(project)
response = self.client.get(url, HTTP_AUTHORIZATION=f"DSN {project_key.dsn_public}")
assert response.status_code == 401, response.content
def test_all_reports(self) -> None:
self.login_as(user=self.user)
project = self.create_project()
event = self.store_event(
data={
"timestamp": timezone.now().isoformat(),
"event_id": "a" * 32,
"message": "testing",
},
project_id=project.id,
)
group = event.group
report_1 = UserReport.objects.create(
project_id=project.id,
event_id="a" * 32,
name="Foo",
email="foo@example.com",
comments="Hello world",
group_id=group.id,
)
group.status = GroupStatus.RESOLVED
group.substatus = None
group.save()
url = _make_url(project)
response = self.client.get(f"{url}?status=", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert sorted(map(lambda x: x["id"], response.data)) == sorted([str(report_1.id)])
def test_environments(self) -> None:
self.login_as(user=self.user)
base_url = _make_url(self.project)
# Specify environment
response = self.client.get(base_url + "?environment=production")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == "a" * 32
# No environment
response = self.client.get(base_url + "?environment=")
assert response.status_code == 200
assert response.data == []
# All environments
response = self.client.get(base_url)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert {report["eventID"] for report in response.data} == {"a" * 32, "b" * 32}
# Invalid environment
response = self.client.get(base_url + "?environment=invalid_env")
assert response.status_code == 200
assert response.data == []
@patch("sentry.quotas.backend.get_event_retention")
def test_retention(self, mock_get_event_retention: MagicMock) -> None:
self.login_as(user=self.user)
retention_days = 21
mock_get_event_retention.return_value = retention_days
UserReport.objects.all().delete() # clear reports saved in setup
UserReport.objects.create(
project_id=self.project.id,
event_id="f" * 32,
environment_id=self.environment.id,
group_id=123,
date_added=before_now(days=retention_days + 1),
)
response = self.client.get(_make_url(self.project))
assert response.status_code == 200
assert len(response.data) == 0
@patch("sentry.quotas.backend.get_event_retention")
def test_event_retention(self, mock_get_event_retention: MagicMock) -> None:
self.login_as(user=self.user)
retention_days = 21
mock_get_event_retention.return_value = retention_days
old_event = self.store_event(
data={
"event_id": "f" * 32,
"timestamp": before_now(days=retention_days + 1).isoformat(),
"environment": self.environment.name,
},
project_id=self.project.id,
)
UserReport.objects.create(
project_id=self.project.id,
event_id=old_event.event_id,
environment_id=self.environment.id,
group_id=old_event.group.id,
date_added=before_now(days=1),
)
response = self.client.get(_make_url(self.project))
# We don't care what is returned here, only that no QueryOutsideRetentionError is thrown.
assert response.status_code == 200
| ProjectUserReportListTest |
python | fastai__fastai | fastai/metrics.py | {
"start": 1430,
"end": 17345
} | class ____(Metric):
"Stores predictions and targets on CPU in accumulate to perform final calculations with `func`."
def __init__(self, func, dim_argmax=None, activation=ActivationType.No, thresh=None, to_np=False,
invert_arg=False, flatten=True, name=None, **kwargs):
store_attr('func,dim_argmax,activation,thresh,flatten')
self._name = ifnone(name, self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__)
self.to_np,self.invert_args,self.kwargs = to_np,invert_arg,kwargs
def reset(self):
"Clear all targs and preds"
self.targs,self.preds = [],[]
def accumulate(self, learn):
"Store targs and preds from `learn`, using activation function and argmax as appropriate"
pred = learn.pred
if self.activation in [ActivationType.Softmax, ActivationType.BinarySoftmax]:
pred = F.softmax(pred, dim=self.dim_argmax)
if self.activation == ActivationType.BinarySoftmax: pred = pred[:, -1]
elif self.activation == ActivationType.Sigmoid: pred = torch.sigmoid(pred)
elif self.dim_argmax: pred = pred.argmax(dim=self.dim_argmax)
if self.thresh: pred = (pred >= self.thresh)
self.accum_values(pred,learn.y,learn)
def accum_values(self, preds, targs,learn=None):
"Store targs and preds"
to_d = learn.to_detach if learn is not None else to_detach
preds,targs = to_d(preds),to_d(targs)
if self.flatten: preds,targs = flatten_check(preds,targs)
self.preds.append(preds)
self.targs.append(targs)
def __call__(self, preds, targs):
"Calculate metric on one batch of data"
self.reset()
self.accum_values(preds,targs)
return self.value
@property
def value(self):
"Value of the metric using accumulated preds and targs"
if len(self.preds) == 0: return
preds,targs = torch.cat(self.preds),torch.cat(self.targs)
if self.to_np: preds,targs = preds.numpy(),targs.numpy()
return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs)
@property
def name(self): return self._name
@name.setter
def name(self, value): self._name = value
# %% ../nbs/13b_metrics.ipynb 16
def skm_to_fastai(func, is_class=True, thresh=None, axis=-1, activation=None, **kwargs):
"Convert `func` from sklearn.metrics to a fastai metric"
dim_argmax = axis if is_class and thresh is None else None
if activation is None:
activation = ActivationType.Sigmoid if (is_class and thresh is not None) else ActivationType.No
return AccumMetric(func, dim_argmax=dim_argmax, activation=activation, thresh=thresh,
to_np=True, invert_arg=True, **kwargs)
# %% ../nbs/13b_metrics.ipynb 22
def optim_metric(f, argname, bounds, tol=0.01, do_neg=True, get_x=False):
"Replace metric `f` with a version that optimizes argument `argname`"
def _f(preds, targs):
def minfunc(x):
kwargs = {argname:x}
res = f(preds, targs, **kwargs)
return -res if do_neg else res
optres = scipy.optimize.minimize_scalar(minfunc, bounds=bounds, method='bounded',
options={'xatol':0.01})
fun = -optres.fun if do_neg else optres.fun
return (fun,optres.x) if get_x else fun
_f.__name__ = f'opt_{f.__name__}'
return _f
# %% ../nbs/13b_metrics.ipynb 26
def accuracy(inp, targ, axis=-1):
"Compute accuracy with `targ` when `pred` is bs * n_classes"
pred,targ = flatten_check(inp.argmax(dim=axis), targ)
return (pred == targ).float().mean()
# %% ../nbs/13b_metrics.ipynb 29
def error_rate(inp, targ, axis=-1):
"1 - `accuracy`"
return 1 - accuracy(inp, targ, axis=axis)
# %% ../nbs/13b_metrics.ipynb 31
def top_k_accuracy(inp, targ, k=5, axis=-1):
"Computes the Top-k accuracy (`targ` is in the top `k` predictions of `inp`)"
inp = inp.topk(k=k, dim=axis)[1]
targ = targ.unsqueeze(dim=axis).expand_as(inp)
return (inp == targ).sum(dim=-1).float().mean()
# %% ../nbs/13b_metrics.ipynb 33
def APScoreBinary(axis=-1, average='macro', pos_label=1, sample_weight=None):
"Average Precision for single-label binary classification problems"
return skm_to_fastai(skm.average_precision_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 35
def BalancedAccuracy(axis=-1, sample_weight=None, adjusted=False):
"Balanced Accuracy for single-label binary classification problems"
return skm_to_fastai(skm.balanced_accuracy_score, axis=axis,
sample_weight=sample_weight, adjusted=adjusted)
# %% ../nbs/13b_metrics.ipynb 37
def BrierScore(axis=-1, sample_weight=None, pos_label=None):
"Brier score for single-label classification problems"
return skm_to_fastai(skm.brier_score_loss, axis=axis,
sample_weight=sample_weight, pos_label=pos_label)
# %% ../nbs/13b_metrics.ipynb 39
def CohenKappa(axis=-1, labels=None, weights=None, sample_weight=None):
"Cohen kappa for single-label classification problems"
return skm_to_fastai(skm.cohen_kappa_score, axis=axis, labels=labels, weights=weights,
sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 41
def F1Score(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"F1 score for single-label classification problems"
return skm_to_fastai(skm.f1_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 43
def FBeta(beta, axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"FBeta score with `beta` for single-label classification problems"
return skm_to_fastai(skm.fbeta_score, axis=axis,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 45
def HammingLoss(axis=-1, sample_weight=None):
"Hamming loss for single-label classification problems"
return skm_to_fastai(skm.hamming_loss, axis=axis,
sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 47
def Jaccard(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Jaccard score for single-label classification problems"
return skm_to_fastai(skm.jaccard_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 49
def Precision(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Precision for single-label classification problems"
return skm_to_fastai(skm.precision_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 51
def Recall(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Recall for single-label classification problems"
return skm_to_fastai(skm.recall_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 53
def RocAuc(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='ovr'):
"Area Under the Receiver Operating Characteristic Curve for single-label multiclass classification problems"
assert multi_class in ['ovr', 'ovo']
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.Softmax, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# %% ../nbs/13b_metrics.ipynb 55
def RocAucBinary(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='raise'):
"Area Under the Receiver Operating Characteristic Curve for single-label binary classification problems"
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# %% ../nbs/13b_metrics.ipynb 57
def MatthewsCorrCoef(sample_weight=None, **kwargs):
"Matthews correlation coefficient for single-label classification problems"
return skm_to_fastai(skm.matthews_corrcoef, sample_weight=sample_weight, **kwargs)
# %% ../nbs/13b_metrics.ipynb 60
def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
"Compute accuracy when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
return ((inp>thresh)==targ.bool()).float().mean()
# %% ../nbs/13b_metrics.ipynb 63
def APScoreMulti(sigmoid=True, average='macro', pos_label=1, sample_weight=None):
"Average Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.average_precision_score, activation=activation, flatten=False,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 65
def BrierScoreMulti(thresh=0.5, sigmoid=True, sample_weight=None, pos_label=None):
"Brier score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.brier_score_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight, pos_label=pos_label)
# %% ../nbs/13b_metrics.ipynb 67
def F1ScoreMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"F1 score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.f1_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 69
def FBetaMulti(beta, thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"FBeta score with `beta` for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.fbeta_score, thresh=thresh, activation=activation, flatten=False,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 71
def HammingLossMulti(thresh=0.5, sigmoid=True, labels=None, sample_weight=None):
"Hamming loss for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.hamming_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 73
def JaccardMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Jaccard score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.jaccard_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 75
def MatthewsCorrCoefMulti(thresh=0.5, sigmoid=True, sample_weight=None):
"Matthews correlation coefficient for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.matthews_corrcoef, thresh=thresh, activation=activation, flatten=False, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 77
def PrecisionMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.precision_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 79
def RecallMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Recall for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.recall_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 81
def RocAucMulti(sigmoid=True, average='macro', sample_weight=None, max_fpr=None):
"Area Under the Receiver Operating Characteristic Curve for multi-label binary classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.roc_auc_score, activation=activation, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr)
# %% ../nbs/13b_metrics.ipynb 85
def mse(inp,targ):
"Mean squared error between `inp` and `targ`."
return F.mse_loss(*flatten_check(inp,targ))
# %% ../nbs/13b_metrics.ipynb 87
def _rmse(inp, targ): return torch.sqrt(F.mse_loss(inp, targ))
rmse = AccumMetric(_rmse)
rmse.__doc__ = "Root mean squared error"
# %% ../nbs/13b_metrics.ipynb 90
def mae(inp,targ):
"Mean absolute error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return torch.abs(inp - targ).mean()
# %% ../nbs/13b_metrics.ipynb 92
def msle(inp, targ):
"Mean squared logarithmic error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return F.mse_loss(torch.log(1 + inp), torch.log(1 + targ))
# %% ../nbs/13b_metrics.ipynb 94
def _exp_rmspe(inp,targ):
inp,targ = torch.exp(inp),torch.exp(targ)
return torch.sqrt(((targ - inp)/targ).pow(2).mean())
exp_rmspe = AccumMetric(_exp_rmspe)
exp_rmspe.__doc__ = "Root mean square percentage error of the exponential of predictions and targets"
# %% ../nbs/13b_metrics.ipynb 97
def ExplainedVariance(sample_weight=None):
"Explained variance between predictions and targets"
return skm_to_fastai(skm.explained_variance_score, is_class=False, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 99
def R2Score(sample_weight=None):
"R2 score between predictions and targets"
return skm_to_fastai(skm.r2_score, is_class=False, sample_weight=sample_weight)
# %% ../nbs/13b_metrics.ipynb 101
@delegates(AccumMetric)
def PearsonCorrCoef(dim_argmax=None, **kwargs):
"Pearson correlation coefficient for regression problem"
def pearsonr(x,y): return scs.pearsonr(x,y)[0]
return AccumMetric(pearsonr, invert_arg=False, dim_argmax=dim_argmax, **kwargs)
# %% ../nbs/13b_metrics.ipynb 104
@delegates(AccumMetric)
def SpearmanCorrCoef(dim_argmax=None, axis=0, nan_policy='propagate', **kwargs):
"Spearman correlation coefficient for regression problem"
def spearmanr(a,b=None,**kwargs): return scs.spearmanr(a,b,**kwargs)[0]
return AccumMetric(partial(spearmanr, axis=axis, nan_policy=nan_policy),
invert_arg=False, dim_argmax=dim_argmax, **kwargs)
# %% ../nbs/13b_metrics.ipynb 112
def foreground_acc(inp, targ, bkg_idx=0, axis=1):
"Computes non-background accuracy for multiclass segmentation"
targ = cast(targ.squeeze(1), TensorBase)
mask = targ != bkg_idx
return (inp.argmax(dim=axis)[mask]==targ[mask]).float().mean()
# %% ../nbs/13b_metrics.ipynb 114
| AccumMetric |
python | ray-project__ray | release/llm_tests/serve/probes/query_utils.py | {
"start": 5398,
"end": 6631
} | class ____(BaseProbe):
def __init__(
self,
client: openai.AsyncClient,
default_configuration=None,
retryable_error_types: Sequence[Type[APIStatusError]] = None,
):
super().__init__(client, retryable_error_types)
self.default_configuration = default_configuration or {}
async def query(
self,
model: str,
stream: bool = False,
chat: bool = True,
**chat_args,
):
args = {
**self.default_configuration,
"model": model,
"stream": stream,
**chat_args,
}
if stream:
args["stream_options"] = {
"include_usage": True,
}
if chat:
method = self.client.chat.completions.create
else:
method = self.client.completions.create
method = backoff.on_exception(
backoff.constant,
self.retryable_error_types,
max_tries=DEFAULT_MAX_ATTEMPTS,
)(method)
res = await method(**args)
wrapped_response = [v async for v in res] if stream else [res]
return TextGenerationProbeResponse(wrapped_response)
| TextGenerationProbeQuerier |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/operators/campaign_manager.py | {
"start": 1395,
"end": 4806
} | class ____(BaseOperator):
"""
Deletes a report by its ID.
.. seealso::
Check official API docs:
`https://developers.google.com/doubleclick-advertisers/rest/v4/reports/delete`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleCampaignManagerDeleteReportOperator`
:param profile_id: The DFA user profile ID.
:param report_name: The name of the report to delete.
:param report_id: The ID of the report.
:param api_version: The version of the api that will be requested, for example 'v4'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"profile_id",
"report_id",
"report_name",
"api_version",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
profile_id: str,
report_name: str | None = None,
report_id: str | None = None,
api_version: str = "v4",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if not (report_name or report_id):
raise AirflowException("Please provide `report_name` or `report_id`.")
if report_name and report_id:
raise AirflowException("Please provide only one parameter `report_name` or `report_id`.")
self.profile_id = profile_id
self.report_name = report_name
self.report_id = report_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = GoogleCampaignManagerHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if self.report_name:
reports = hook.list_reports(profile_id=self.profile_id)
reports_with_name = [r for r in reports if r["name"] == self.report_name]
for report in reports_with_name:
report_id = report["id"]
self.log.info("Deleting Campaign Manager report: %s", report_id)
hook.delete_report(profile_id=self.profile_id, report_id=report_id)
self.log.info("Report deleted.")
elif self.report_id:
self.log.info("Deleting Campaign Manager report: %s", self.report_id)
hook.delete_report(profile_id=self.profile_id, report_id=self.report_id)
self.log.info("Report deleted.")
| GoogleCampaignManagerDeleteReportOperator |
python | pydantic__pydantic | pydantic/v1/mypy.py | {
"start": 2718,
"end": 8249
} | class ____(Plugin):
def __init__(self, options: Options) -> None:
self.plugin_config = PydanticPluginConfig(options)
self._plugin_data = self.plugin_config.to_data()
super().__init__(options)
def get_base_class_hook(self, fullname: str) -> 'Optional[Callable[[ClassDefContext], None]]':
sym = self.lookup_fully_qualified(fullname)
if sym and isinstance(sym.node, TypeInfo): # pragma: no branch
# No branching may occur if the mypy cache has not been cleared
if any(get_fullname(base) == BASEMODEL_FULLNAME for base in sym.node.mro):
return self._pydantic_model_class_maker_callback
return None
def get_metaclass_hook(self, fullname: str) -> Optional[Callable[[ClassDefContext], None]]:
if fullname == MODEL_METACLASS_FULLNAME:
return self._pydantic_model_metaclass_marker_callback
return None
def get_function_hook(self, fullname: str) -> 'Optional[Callable[[FunctionContext], Type]]':
sym = self.lookup_fully_qualified(fullname)
if sym and sym.fullname == FIELD_FULLNAME:
return self._pydantic_field_callback
return None
def get_method_hook(self, fullname: str) -> Optional[Callable[[MethodContext], Type]]:
if fullname.endswith('.from_orm'):
return from_orm_callback
return None
def get_class_decorator_hook(self, fullname: str) -> Optional[Callable[[ClassDefContext], None]]:
"""Mark pydantic.dataclasses as dataclass.
Mypy version 1.1.1 added support for `@dataclass_transform` decorator.
"""
if fullname == DATACLASS_FULLNAME and MYPY_VERSION_TUPLE < (1, 1):
return dataclasses.dataclass_class_maker_callback # type: ignore[return-value]
return None
def report_config_data(self, ctx: ReportConfigContext) -> Dict[str, Any]:
"""Return all plugin config data.
Used by mypy to determine if cache needs to be discarded.
"""
return self._plugin_data
def _pydantic_model_class_maker_callback(self, ctx: ClassDefContext) -> None:
transformer = PydanticModelTransformer(ctx, self.plugin_config)
transformer.transform()
def _pydantic_model_metaclass_marker_callback(self, ctx: ClassDefContext) -> None:
"""Reset dataclass_transform_spec attribute of ModelMetaclass.
Let the plugin handle it. This behavior can be disabled
if 'debug_dataclass_transform' is set to True', for testing purposes.
"""
if self.plugin_config.debug_dataclass_transform:
return
info_metaclass = ctx.cls.info.declared_metaclass
assert info_metaclass, "callback not passed from 'get_metaclass_hook'"
if getattr(info_metaclass.type, 'dataclass_transform_spec', None):
info_metaclass.type.dataclass_transform_spec = None # type: ignore[attr-defined]
def _pydantic_field_callback(self, ctx: FunctionContext) -> 'Type':
"""
Extract the type of the `default` argument from the Field function, and use it as the return type.
In particular:
* Check whether the default and default_factory argument is specified.
* Output an error if both are specified.
* Retrieve the type of the argument which is specified, and use it as return type for the function.
"""
default_any_type = ctx.default_return_type
assert ctx.callee_arg_names[0] == 'default', '"default" is no longer first argument in Field()'
assert ctx.callee_arg_names[1] == 'default_factory', '"default_factory" is no longer second argument in Field()'
default_args = ctx.args[0]
default_factory_args = ctx.args[1]
if default_args and default_factory_args:
error_default_and_default_factory_specified(ctx.api, ctx.context)
return default_any_type
if default_args:
default_type = ctx.arg_types[0][0]
default_arg = default_args[0]
# Fallback to default Any type if the field is required
if not isinstance(default_arg, EllipsisExpr):
return default_type
elif default_factory_args:
default_factory_type = ctx.arg_types[1][0]
# Functions which use `ParamSpec` can be overloaded, exposing the callable's types as a parameter
# Pydantic calls the default factory without any argument, so we retrieve the first item
if isinstance(default_factory_type, Overloaded):
if MYPY_VERSION_TUPLE > (0, 910):
default_factory_type = default_factory_type.items[0]
else:
# Mypy0.910 exposes the items of overloaded types in a function
default_factory_type = default_factory_type.items()[0] # type: ignore[operator]
if isinstance(default_factory_type, CallableType):
ret_type = get_proper_type(default_factory_type.ret_type)
if (
isinstance(ret_type, Instance)
and ret_type.args
and all(isinstance(arg, TypeVarType) for arg in ret_type.args)
):
# Looks like the default factory is a type like `list` or `dict`, replace all args with `Any`
ret_type = ret_type.copy_modified(args=[default_any_type] * len(ret_type.args))
return ret_type
return default_any_type
| PydanticPlugin |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 6900,
"end": 7866
} | class ____(object):
"""*
jina gRPC service for DataRequests.
This is used to send requests to Executors when a list of requests is not needed
"""
@staticmethod
def stream_doc(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
'/jina.JinaSingleDocumentRequestRPC/stream_doc',
jina__pb2.SingleDocumentRequestProto.SerializeToString,
jina__pb2.SingleDocumentRequestProto.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| JinaSingleDocumentRequestRPC |
python | kamyu104__LeetCode-Solutions | Python/most-frequent-even-element.py | {
"start": 63,
"end": 340
} | class ____(object):
def mostFrequentEven(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
cnt = collections.Counter(x for x in nums if x%2 == 0)
return max(cnt.iterkeys(), key=lambda x: (cnt[x], -x)) if cnt else -1
| Solution |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 3069,
"end": 4279
} | class ____(TreeTestCase):
def test_run_doctest(self):
import doctest
class DummyStream:
content = ""
encoding = "utf8"
def write(self, text):
self.content += text
def flush(self):
pass
dummy_stream = DummyStream()
before = sys.stdout
sys.stdout = dummy_stream
doctest.testfile(
os.path.join(os.path.dirname(__file__), "doctests.txt"),
module_relative=False,
optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS,
encoding="utf-8",
)
sys.stdout = before
content = dummy_stream.content
if content:
before.write(content + "\n")
self.fail()
# genres.json defines the following tree structure
#
# 1 - 1 0 1 16 action
# 2 1 1 1 2 9 +-- platformer
# 3 2 1 2 3 4 | |-- platformer_2d
# 4 2 1 2 5 6 | |-- platformer_3d
# 5 2 1 2 7 8 | +-- platformer_4d
# 6 1 1 1 10 15 +-- shmup
# 7 6 1 2 11 12 |-- shmup_vertical
# 8 6 1 2 13 14 +-- shmup_horizontal
# 9 - 2 0 1 6 rpg
# 10 9 2 1 2 3 |-- arpg
# 11 9 2 1 4 5 +-- trpg
| DocTestTestCase |
python | falconry__falcon | tests/test_headers.py | {
"start": 568,
"end": 3364
} | class ____:
def __init__(self, last_modified=None):
if last_modified is not None:
self.last_modified = last_modified
else:
self.last_modified = _utcnow()
def _overwrite_headers(self, req, resp):
resp.content_type = 'x-falcon/peregrine'
resp.cache_control = ['no-store']
def on_get(self, req, resp):
resp.text = '{}'
resp.content_type = 'x-falcon/peregrine'
resp.cache_control = [
'public',
'private',
'no-cache',
'no-store',
'must-revalidate',
'proxy-revalidate',
'max-age=3600',
's-maxage=60',
'no-transform',
]
resp.etag = None # Header not set yet, so should be a noop
resp.etag = 'fa0d1a60ef6616bb28038515c8ea4cb2'
resp.last_modified = self.last_modified
resp.retry_after = 3601
# Relative URI's are OK per
# https://datatracker.ietf.org/doc/html/rfc7231#section-7.1.2
resp.location = '/things/87'
resp.content_location = '/things/78'
resp.downloadable_as = None # Header not set yet, so should be a noop
resp.downloadable_as = 'Some File.zip'
if req.range_unit is None or req.range_unit == 'bytes':
# bytes 0-499/10240
resp.content_range = (0, 499, 10 * 1024)
else:
resp.content_range = (0, 25, 100, req.range_unit)
resp.accept_ranges = None # Header not set yet, so should be a noop
resp.accept_ranges = 'bytes'
# Test the removal of custom headers
resp.set_header('X-Client-Should-Never-See-This', 'abc')
assert resp.get_header('x-client-should-never-see-this') == 'abc'
resp.delete_header('x-client-should-never-see-this')
self.req = req
self.resp = resp
def on_head(self, req, resp):
resp.set_header('Content-Type', 'x-swallow/unladen')
resp.set_header('X-Auth-Token', 'setecastronomy')
resp.set_header('X-AUTH-TOKEN', 'toomanysecrets')
resp.location = '/things/87'
del resp.location
self._overwrite_headers(req, resp)
self.resp = resp
def on_post(self, req, resp):
resp.set_headers(
[
('CONTENT-TYPE', 'x-swallow/unladen'),
('X-Auth-Token', 'setecastronomy'),
('X-AUTH-TOKEN', 'toomanysecrets'),
]
)
self._overwrite_headers(req, resp)
self.resp = resp
def on_put(self, req, resp):
resp.set_headers(
{'CONTENT-TYPE': 'x-swallow/unladen', 'X-aUTH-tOKEN': 'toomanysecrets'}
)
self._overwrite_headers(req, resp)
self.resp = resp
| HeaderHelpersResource |
python | PyCQA__pylint | tests/functional/b/bad_reversed_sequence.py | {
"start": 327,
"end": 497
} | class ____:
""" Implements __len__ and __getitem__ """
def __len__(self):
return 3
def __getitem__(self, index):
return index
| SecondGoodReversed |
python | sympy__sympy | sympy/core/numbers.py | {
"start": 102320,
"end": 106940
} | class ____(Number, metaclass=Singleton):
"""Negative infinite quantity.
NegativeInfinity is a singleton, and can be accessed
by ``S.NegativeInfinity``.
See Also
========
Infinity
"""
is_extended_real = True
is_complex = False
is_commutative = True
is_infinite = True
is_comparable = True
is_extended_negative = True
is_number = True
is_prime = False
__slots__ = ()
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"-\infty"
def _eval_subs(self, old, new):
if self == old:
return new
def _eval_evalf(self, prec=None):
return Float('-inf')
def evalf(self, prec=None, **options):
return self._eval_evalf(prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (S.Infinity, S.NaN):
return S.NaN
return self
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other in (S.NegativeInfinity, S.NaN):
return S.NaN
return self
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
return (-self).__add__(other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other.is_zero or other is S.NaN:
return S.NaN
if other.is_extended_positive:
return self
return S.Infinity
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if isinstance(other, Number) and global_parameters.evaluate:
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
if other.is_extended_nonnegative:
return self
return S.Infinity
return Number.__truediv__(self, other)
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.Infinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``(-oo) ** nan`` ``nan``
``(-oo) ** oo`` ``nan``
``(-oo) ** -oo`` ``nan``
``(-oo) ** e`` ``oo`` ``e`` is positive even integer
``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer
================ ======= ==============================
See Also
========
Infinity
Pow
NaN
"""
if expt.is_number:
if expt is S.NaN or \
expt is S.Infinity or \
expt is S.NegativeInfinity:
return S.NaN
if isinstance(expt, Integer) and expt.is_extended_positive:
if expt.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
inf_part = S.Infinity**expt
s_part = S.NegativeOne**expt
if inf_part == 0 and s_part.is_finite:
return inf_part
if (inf_part is S.ComplexInfinity and
s_part.is_finite and not s_part.is_zero):
return S.ComplexInfinity
return s_part*inf_part
def _as_mpf_val(self, prec):
return mlib.fninf
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
return other is S.NegativeInfinity or other == float('-inf')
def __ne__(self, other):
return other is not S.NegativeInfinity and other != float('-inf')
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if not isinstance(other, Expr):
return NotImplemented
return S.NaN
__rmod__ = __mod__
def floor(self):
return self
def ceiling(self):
return self
def as_powers_dict(self):
return {S.NegativeOne: 1, S.Infinity: 1}
| NegativeInfinity |
python | openai__openai-python | tests/api_resources/vector_stores/test_files.py | {
"start": 588,
"end": 12655
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
file = client.vector_stores.files.create(
vector_store_id="vs_abc123",
file_id="file_id",
)
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
file = client.vector_stores.files.create(
vector_store_id="vs_abc123",
file_id="file_id",
attributes={"foo": "string"},
chunking_strategy={"type": "auto"},
)
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.vector_stores.files.with_raw_response.create(
vector_store_id="vs_abc123",
file_id="file_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.vector_stores.files.with_streaming_response.create(
vector_store_id="vs_abc123",
file_id="file_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(VectorStoreFile, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.create(
vector_store_id="",
file_id="file_id",
)
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
file = client.vector_stores.files.retrieve(
file_id="file-abc123",
vector_store_id="vs_abc123",
)
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.vector_stores.files.with_raw_response.retrieve(
file_id="file-abc123",
vector_store_id="vs_abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.vector_stores.files.with_streaming_response.retrieve(
file_id="file-abc123",
vector_store_id="vs_abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(VectorStoreFile, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.retrieve(
file_id="file-abc123",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.vector_stores.files.with_raw_response.retrieve(
file_id="",
vector_store_id="vs_abc123",
)
@parametrize
def test_method_update(self, client: OpenAI) -> None:
file = client.vector_stores.files.update(
file_id="file-abc123",
vector_store_id="vs_abc123",
attributes={"foo": "string"},
)
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_raw_response_update(self, client: OpenAI) -> None:
response = client.vector_stores.files.with_raw_response.update(
file_id="file-abc123",
vector_store_id="vs_abc123",
attributes={"foo": "string"},
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
def test_streaming_response_update(self, client: OpenAI) -> None:
with client.vector_stores.files.with_streaming_response.update(
file_id="file-abc123",
vector_store_id="vs_abc123",
attributes={"foo": "string"},
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(VectorStoreFile, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_update(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.update(
file_id="file-abc123",
vector_store_id="",
attributes={"foo": "string"},
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.vector_stores.files.with_raw_response.update(
file_id="",
vector_store_id="vs_abc123",
attributes={"foo": "string"},
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
file = client.vector_stores.files.list(
vector_store_id="vector_store_id",
)
assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
file = client.vector_stores.files.list(
vector_store_id="vector_store_id",
after="after",
before="before",
filter="in_progress",
limit=0,
order="asc",
)
assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.vector_stores.files.with_raw_response.list(
vector_store_id="vector_store_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.vector_stores.files.with_streaming_response.list(
vector_store_id="vector_store_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(SyncCursorPage[VectorStoreFile], file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.list(
vector_store_id="",
)
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
file = client.vector_stores.files.delete(
file_id="file_id",
vector_store_id="vector_store_id",
)
assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.vector_stores.files.with_raw_response.delete(
file_id="file_id",
vector_store_id="vector_store_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.vector_stores.files.with_streaming_response.delete(
file_id="file_id",
vector_store_id="vector_store_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(VectorStoreFileDeleted, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.delete(
file_id="file_id",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.vector_stores.files.with_raw_response.delete(
file_id="",
vector_store_id="vector_store_id",
)
@parametrize
def test_method_content(self, client: OpenAI) -> None:
file = client.vector_stores.files.content(
file_id="file-abc123",
vector_store_id="vs_abc123",
)
assert_matches_type(SyncPage[FileContentResponse], file, path=["response"])
@parametrize
def test_raw_response_content(self, client: OpenAI) -> None:
response = client.vector_stores.files.with_raw_response.content(
file_id="file-abc123",
vector_store_id="vs_abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(SyncPage[FileContentResponse], file, path=["response"])
@parametrize
def test_streaming_response_content(self, client: OpenAI) -> None:
with client.vector_stores.files.with_streaming_response.content(
file_id="file-abc123",
vector_store_id="vs_abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(SyncPage[FileContentResponse], file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_content(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.content(
file_id="file-abc123",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.vector_stores.files.with_raw_response.content(
file_id="",
vector_store_id="vs_abc123",
)
| TestFiles |
python | plotly__plotly.py | plotly/graph_objs/table/hoverlabel/_font.py | {
"start": 233,
"end": 17133
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "table.hoverlabel"
_path_str = "table.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.table.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | numpy__numpy | numpy/fft/tests/test_pocketfft.py | {
"start": 353,
"end": 462
} | class ____:
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
| TestFFTShift |
python | openai__openai-python | src/openai/types/responses/response_function_shell_tool_call_output.py | {
"start": 815,
"end": 1071
} | class ____(BaseModel):
outcome: OutputOutcome
"""
Represents either an exit outcome (with an exit code) or a timeout outcome for a
shell call output chunk.
"""
stderr: str
stdout: str
created_by: Optional[str] = None
| Output |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/declarative_automation_tests/scenario_utils/scenario_state.py | {
"start": 3424,
"end": 3608
} | class ____(NamedTuple):
specs: Sequence[dg.AssetSpec]
partitions_def: Optional[dg.PartitionsDefinition] = None
can_subset: bool = False
@dataclass(frozen=True)
| MultiAssetSpec |
python | gevent__gevent | src/greentest/3.13/test_ssl.py | {
"start": 92849,
"end": 106232
} | class ____(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
# bpo-44229, bpo-43855, bpo-44237, and bpo-33450:
# Ignore spurious EPROTOTYPE returned by write() on macOS.
# See also http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE and sys.platform != "darwin":
self.running = False
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
if cert_binary is None:
sys.stdout.write(" client did not provide a cert\n")
else:
sys.stdout.write(f" cert binary is {len(cert_binary)}b\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
elif stripped == b'VERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_verified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
elif stripped == b'UNVERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_unverified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError as e:
# handles SSLError and socket errors
if isinstance(e, ConnectionError):
# OpenSSL 1.1.1 sometimes raises
# ConnectionResetError when connection is not
# shut down gracefully.
if self.server.chatty and support.verbose:
print(f" Connection reset by peer: {self.addr}")
self.close()
self.running = False
return
if self.server.chatty and support.verbose:
handle_error("Test server failure:\n")
try:
self.write(b"ERROR\n")
except OSError:
pass
self.close()
self.running = False
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
self._in_context = False
def __enter__(self):
if self._in_context:
raise ValueError('Re-entering ThreadedEchoServer context')
self._in_context = True
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
assert self._in_context
self._in_context = False
self.stop()
self.join()
def start(self, flag=None):
if not self._in_context:
raise ValueError(
'ThreadedEchoServer must be used as a context manager')
self.flag = flag
threading.Thread.start(self)
def run(self):
if not self._in_context:
raise ValueError(
'ThreadedEchoServer must be used as a context manager')
self.sock.settimeout(1.0)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError as e:
if support.verbose:
sys.stdout.write(f' connection timeout {e!r}\n')
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
self.sock = None
def stop(self):
self.active = False
| ThreadedEchoServer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/custom_job.py | {
"start": 91105,
"end": 95059
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a CustomTrainingJob, CustomPythonTrainingJob, or CustomContainerTrainingJob.
:param training_pipeline_id: Required. The name of the TrainingPipeline resource to be deleted.
:param custom_job_id: Required. The name of the CustomJob to delete.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("training_pipeline_id", "custom_job_id", "region", "project_id", "impersonation_chain")
def __init__(
self,
*,
training_pipeline_id: str,
custom_job_id: str,
region: str,
project_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.training_pipeline_id = training_pipeline_id
self.custom_job_id = custom_job_id
self.region = region
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CustomJobHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting custom training pipeline: %s", self.training_pipeline_id)
training_pipeline_operation = hook.delete_training_pipeline(
training_pipeline=self.training_pipeline_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=training_pipeline_operation)
self.log.info("Training pipeline was deleted.")
except NotFound:
self.log.info("The Training Pipeline ID %s does not exist.", self.training_pipeline_id)
try:
self.log.info("Deleting custom job: %s", self.custom_job_id)
custom_job_operation = hook.delete_custom_job(
custom_job=self.custom_job_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=custom_job_operation)
self.log.info("Custom job was deleted.")
except NotFound:
self.log.info("The Custom Job ID %s does not exist.", self.custom_job_id)
| DeleteCustomTrainingJobOperator |
python | apache__airflow | airflow-core/tests/unit/serialization/test_serialized_objects.py | {
"start": 18033,
"end": 23323
} | class ____(BaseTrigger):
def __init__(self, hi):
self.hi = hi
def serialize(self):
return "unit.serialization.test_serialized_objects.MyTrigger", {"hi": self.hi}
async def run(self):
yield
def test_roundtrip_exceptions():
"""
This is for AIP-44 when we need to send certain non-error exceptions
as part of an RPC call e.g. TaskDeferred or AirflowRescheduleException.
"""
some_date = pendulum.now()
resched_exc = AirflowRescheduleException(reschedule_date=some_date)
ser = BaseSerialization.serialize(resched_exc)
deser = BaseSerialization.deserialize(ser)
assert isinstance(deser, AirflowRescheduleException)
assert deser.reschedule_date == some_date
del ser
del deser
exc = TaskDeferred(
trigger=MyTrigger(hi="yo"),
method_name="meth_name",
kwargs={"have": "pie"},
timeout=timedelta(seconds=30),
)
ser = BaseSerialization.serialize(exc)
deser = BaseSerialization.deserialize(ser)
assert deser.trigger.hi == "yo"
assert deser.method_name == "meth_name"
assert deser.kwargs == {"have": "pie"}
assert deser.timeout == timedelta(seconds=30)
@pytest.mark.parametrize(
"concurrency_parameter",
[
"max_active_tis_per_dag",
"max_active_tis_per_dagrun",
],
)
@pytest.mark.db_test
def test_serialized_dag_has_task_concurrency_limits(dag_maker, concurrency_parameter):
with dag_maker() as dag:
BashOperator(task_id="task1", bash_command="echo 1", **{concurrency_parameter: 1})
ser_dict = SerializedDAG.to_dict(dag)
lazy_serialized_dag = LazyDeserializedDAG(data=ser_dict)
assert lazy_serialized_dag.has_task_concurrency_limits
@pytest.mark.parametrize(
"concurrency_parameter",
[
"max_active_tis_per_dag",
"max_active_tis_per_dagrun",
],
)
@pytest.mark.db_test
def test_serialized_dag_mapped_task_has_task_concurrency_limits(dag_maker, concurrency_parameter):
with dag_maker() as dag:
@task
def my_task():
return [1, 2, 3, 4, 5, 6, 7]
@task(**{concurrency_parameter: 1})
def map_me_but_slowly(a):
pass
map_me_but_slowly.expand(a=my_task())
ser_dict = SerializedDAG.to_dict(dag)
lazy_serialized_dag = LazyDeserializedDAG(data=ser_dict)
assert lazy_serialized_dag.has_task_concurrency_limits
def test_hash_property():
from airflow.models.serialized_dag import SerializedDagModel
data = {"dag": {"dag_id": "dag1"}}
lazy_serialized_dag = LazyDeserializedDAG(data=data)
assert lazy_serialized_dag.hash == SerializedDagModel.hash(data)
@pytest.mark.parametrize(
("payload", "expected_cls"),
[
pytest.param(
{
"__type": DAT.ASSET,
"name": "test_asset",
"uri": "test://asset-uri",
"group": "test-group",
"extra": {},
},
Asset,
id="asset",
),
pytest.param(
{
"__type": DAT.ASSET_ALL,
"objects": [
{
"__type": DAT.ASSET,
"name": "x",
"uri": "test://x",
"group": "g",
"extra": {},
},
{
"__type": DAT.ASSET,
"name": "x",
"uri": "test://x",
"group": "g",
"extra": {},
},
],
},
AssetAll,
id="asset_all",
),
pytest.param(
{
"__type": DAT.ASSET_ANY,
"objects": [
{
"__type": DAT.ASSET,
"name": "y",
"uri": "test://y",
"group": "g",
"extra": {},
}
],
},
AssetAny,
id="asset_any",
),
pytest.param(
{"__type": DAT.ASSET_ALIAS, "name": "alias", "group": "g"},
AssetAlias,
id="asset_alias",
),
pytest.param(
{"__type": DAT.ASSET_REF, "name": "ref"},
AssetRef,
id="asset_ref",
),
],
)
def test_serde_decode_asset_condition_success(payload, expected_cls):
from airflow.serialization.serialized_objects import decode_asset_condition
assert isinstance(decode_asset_condition(payload), expected_cls)
def test_serde_decode_asset_condition_unknown_type():
from airflow.serialization.serialized_objects import decode_asset_condition
with pytest.raises(
ValueError,
match="deserialization not implemented for DAT 'UNKNOWN_TYPE'",
):
decode_asset_condition({"__type": "UNKNOWN_TYPE"})
def test_encode_timezone():
from airflow.serialization.serialized_objects import encode_timezone
assert encode_timezone(FixedTimezone(0)) == "UTC"
with pytest.raises(ValueError, match="DAG timezone should be a pendulum.tz.Timezone"):
encode_timezone(object())
| MyTrigger |
python | huggingface__transformers | src/transformers/models/lfm2_vl/modular_lfm2_vl.py | {
"start": 9030,
"end": 14162
} | class ____(LlavaForConditionalGeneration):
_checkpoint_conversion_mapping = {}
def get_image_features(
self,
pixel_values: torch.FloatTensor,
spatial_shapes: torch.Tensor,
pixel_attention_mask: torch.Tensor,
**kwargs,
):
return self.model.get_image_features(
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
**kwargs,
)
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
pixel_attention_mask: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Lfm2VlCausalLMOutputWithPast]:
r"""
pixel_values (`torch.FloatTensor` of shape `(batch_size, channels, height, width)`, *optional*):
The input image tensors.
spatial_shapes (`torch.Tensor` of shape `(batch_size, 2)`, *optional*):
The spatial shapes of the input images.
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, height, width)`, *optional*):
The pixel attention mask of the input images.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
>>> from transformers.image_utils import load_image
>>> model = AutoModelForImageTextToText.from_pretrained(
... "LiquidAI/LFM2-VL-1.6B",
... )
>>> processor = AutoProcessor.from_pretrained(
... "LiquidAI/LFM2-VL-1.6B",
... )
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = load_image(url)
>>> conversation = [
... {
... "role": "user",
... "content": [
... {"type": "image", "image": image},
... {"type": "text", "text": "What is in this image?"},
... ],
... },
... ]
>>> inputs = processor.apply_chat_template(
... conversation,
... add_generation_prompt=True,
... tokenize=True,
... return_dict=True,
... return_tensors="pt"
... )
>>> # Generate
>>> outputs = model.generate(**inputs, max_new_tokens=45)
>>> processor.batch_decode(outputs, skip_special_tokens=True)[0]
'This image depicts a vibrant street scene in what appears to be a Chinatown or similar cultural area. The focal point is a large red stop sign with white lettering, mounted on a pole.'
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
spatial_shapes=spatial_shapes,
pixel_attention_mask=pixel_attention_mask,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits,
labels=labels,
vocab_size=self.config.text_config.vocab_size,
**kwargs,
)
return Lfm2VlCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
__all__ = ["Lfm2VlForConditionalGeneration", "Lfm2VlPreTrainedModel", "Lfm2VlModel"]
| Lfm2VlForConditionalGeneration |
python | ray-project__ray | python/ray/air/util/tensor_extensions/pandas.py | {
"start": 19103,
"end": 20751
} | class ____(_TensorOpsMixin, _TensorScalarCastMixin):
"""
Single element of a TensorArray, wrapping an underlying ndarray.
"""
def __init__(self, values: np.ndarray):
"""
Construct a TensorArrayElement from a NumPy ndarray.
Args:
values: ndarray that underlies this TensorArray element.
"""
self._tensor = values
def __repr__(self):
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
@property
def numpy_dtype(self):
"""
Get the dtype of the tensor.
:return: The numpy dtype of the backing ndarray
"""
return self._tensor.dtype
@property
def numpy_ndim(self):
"""
Get the number of tensor dimensions.
:return: integer for the number of dimensions
"""
return self._tensor.ndim
@property
def numpy_shape(self):
"""
Get the shape of the tensor.
:return: A tuple of integers for the numpy shape of the backing ndarray
"""
return self._tensor.shape
@property
def numpy_size(self):
"""
Get the size of the tensor.
:return: integer for the number of elements in the tensor
"""
return self._tensor.size
def to_numpy(self):
"""
Return the values of this element as a NumPy ndarray.
"""
return np.asarray(self._tensor)
def __array__(self, dtype: np.dtype = None, **kwargs) -> np.ndarray:
return np.asarray(self._tensor, dtype=dtype, **kwargs)
@PublicAPI(stability="beta")
| TensorArrayElement |
python | pytorch__pytorch | test/dynamo/test_utils.py | {
"start": 388,
"end": 6712
} | class ____(TestCase):
def test_nan(self):
a = torch.Tensor([float("nan")])
b = torch.Tensor([float("nan")])
fp64_ref = torch.DoubleTensor([5.0])
res = utils.same(a, b, fp64_ref=fp64_ref, equal_nan=True)
self.assertTrue(res)
def test_larger_multiplier_for_smaller_tensor(self):
"""
Tensor numel between (10, 500]
"""
N = 100
fp64_ref = torch.full([N], 0.0, dtype=torch.double)
a = torch.full([N], 1.0)
tol = 4 * 1e-2
self.assertTrue(utils.same(a, a * 2, fp64_ref=fp64_ref, tol=tol))
self.assertFalse(utils.same(a, a * 4, fp64_ref=fp64_ref, tol=tol))
self.assertTrue(
utils.same(
a,
a * 4,
fp64_ref=fp64_ref,
use_larger_multiplier_for_smaller_tensor=True,
tol=tol,
)
)
self.assertFalse(
utils.same(
a,
a * 9,
fp64_ref=fp64_ref,
use_larger_multiplier_for_smaller_tensor=True,
tol=tol,
)
)
def test_larger_multiplier_for_even_smaller_tensor(self):
"""
Tesnor numel <=10
"""
fp64_ref = torch.DoubleTensor([0.0])
a = torch.Tensor([1.0])
tol = 4 * 1e-2
self.assertTrue(utils.same(a, a * 2, fp64_ref=fp64_ref, tol=tol))
self.assertFalse(utils.same(a, a * 7, fp64_ref=fp64_ref, tol=tol))
self.assertTrue(
utils.same(
a,
a * 7,
fp64_ref=fp64_ref,
use_larger_multiplier_for_smaller_tensor=True,
tol=tol,
)
)
self.assertFalse(
utils.same(
a,
a * 20,
fp64_ref=fp64_ref,
use_larger_multiplier_for_smaller_tensor=True,
tol=tol,
)
)
@dynamo_config.patch(
{
"log_compilation_metrics": True,
"inline_inbuilt_nn_modules": False,
}
)
def test_graph_break_counting(self):
"""
Run a compilation that includes a graph break and validate that the
graph break counter is incremented.
"""
def run_forward_backward():
model = torch.compile(TestModel())
x = torch.rand([3], requires_grad=True)
output = model(x)
loss_fn = torch.nn.MSELoss()
target = torch.tensor([1.0])
loss = loss_fn(output, target)
loss.backward()
@torch.compile
def add(x, y):
return x + y
@torch.compile
def break_it(x):
y = x.sum()
if y > 0:
return x + y.item()
return x - y.item()
@torch.compile
def break_it2(x):
y = x.sum()
if y > 0:
if y > 1:
return x * y.item()
return x + y.item()
return x - y.item()
add(torch.rand([10]), torch.rand([10]))
utils.reset_frame_count()
compilation_events = []
with mock.patch("torch._dynamo.utils.log_compilation_event") as log_event:
run_forward_backward()
compilation_events = [arg[0][0] for arg in log_event.call_args_list]
self.assertEqual(compilation_events[-1].num_graph_breaks, 0)
# We should fallback to normal mode and increment the graph break counter
torch.compile(break_it, backend="inductor")(torch.ones(3, 3))
compilation_events = [arg[0][0] for arg in log_event.call_args_list]
self.assertEqual(compilation_events[-1].num_graph_breaks, 1)
# Graph break counter should be incremented by 1 (after a reset), not 2
torch.compile(break_it, backend="inductor")(torch.ones(3, 3))
compilation_events = [arg[0][0] for arg in log_event.call_args_list]
self.assertEqual(compilation_events[-1].num_graph_breaks, 1)
# Graph break counter should be incremented by 2
torch.compile(break_it2, backend="inductor")(torch.ones(3, 3))
compilation_events = [arg[0][0] for arg in log_event.call_args_list]
self.assertEqual(compilation_events[-1].num_graph_breaks, 2)
def test_traced_code_query(self):
try:
from .utils import add, break_it
except ImportError:
from utils import add, break_it
traced_code_lists = []
def get_filenames(traced_code_lists):
return [
[code.co_filename for code in code_list]
for code_list in traced_code_lists
]
def my_backend(gm, example_inputs):
from torch._dynamo.utils import get_traced_code
nonlocal traced_code_lists
traced_code_lists.append(get_traced_code())
return gm.forward
utils_path = os.path.join(os.path.dirname(__file__), "utils.py")
# === no inlining ===
@torch.compile(backend=my_backend)
def fn(x):
return x * 2
x = torch.randn(3)
traced_code_lists = []
fn(x)
self.assertEqual(get_filenames(traced_code_lists), [[__file__]])
# === successful inlining ===
@torch.compile(backend=my_backend)
def fn(x):
return add(x) * 2
x = torch.randn(3)
traced_code_lists = []
fn(x)
utils_path = os.path.join(os.path.dirname(__file__), "utils.py")
self.assertEqual(get_filenames(traced_code_lists), [[__file__, utils_path]])
# === graph break occurs during inlining ===
@torch.compile(backend=my_backend)
def fn(x):
z = x + 1
y = break_it(z)
return y * 2
x = torch.randn(3)
traced_code_lists = []
fn(x)
self.assertEqual(get_filenames(traced_code_lists), [[__file__], [utils_path]])
# === empty graph ===
@torch.compile(backend=my_backend)
def fn(x):
return x
x = torch.randn(3)
traced_code_lists = []
fn(x)
self.assertEqual(traced_code_lists, [])
| TestUtils |
python | tox-dev__tox | src/tox/tox_env/python/runner.py | {
"start": 764,
"end": 5753
} | class ____(Python, RunToxEnv, ABC):
def __init__(self, create_args: ToxEnvCreateArgs) -> None:
super().__init__(create_args)
def register_config(self) -> None:
super().register_config()
root = self.core["toxinidir"]
self.conf.add_config(
keys=["deps"],
of_type=PythonDeps,
factory=partial(PythonDeps.factory, root),
default=PythonDeps("", root),
desc="python dependencies with optional version specifiers, as specified by PEP-440",
)
self.conf.add_config(
keys=["dependency_groups"],
of_type=set[str],
default=set(),
desc="dependency groups to install of the target package",
post_process=_normalize_extras,
)
add_skip_missing_interpreters_to_core(self.core, self.options)
@property
def _package_types(self) -> tuple[str, ...]:
return "wheel", "sdist", "editable", "editable-legacy", "skip", "external"
def _register_package_conf(self) -> bool:
# provision package type
desc = f"package installation mode - {' | '.join(i for i in self._package_types)} "
if not super()._register_package_conf():
self.conf.add_constant(["package"], desc, "skip")
return False
if getattr(self.options, "install_pkg", None) is not None:
self.conf.add_constant(["package"], desc, "external")
else:
self.conf.add_config(
keys=["use_develop", "usedevelop"],
desc="use develop mode",
default=False,
of_type=bool,
)
develop_mode = self.conf["use_develop"] or getattr(self.options, "develop", False)
if develop_mode:
self.conf.add_constant(["package"], desc, "editable")
else:
self.conf.add_config(keys="package", of_type=str, default=self.default_pkg_type, desc=desc)
pkg_type = self.pkg_type
if pkg_type == "skip":
return False
add_extras_to_env(self.conf)
return True
@property
def default_pkg_type(self) -> str:
return "sdist"
@property
def pkg_type(self) -> str:
pkg_type: str = self.conf["package"]
if pkg_type not in self._package_types:
values = ", ".join(self._package_types)
msg = f"invalid package config type {pkg_type} requested, must be one of {values}"
raise HandledError(msg)
return pkg_type
def _setup_env(self) -> None:
super()._setup_env()
self._install_deps()
self._install_dependency_groups()
def _install_deps(self) -> None:
requirements_file: PythonDeps = self.conf["deps"]
self._install(requirements_file, PythonRun.__name__, "deps")
def _install_dependency_groups(self) -> None:
groups: set[str] = self.conf["dependency_groups"]
if not groups:
return
try:
root: Path = self.core["package_root"]
except KeyError:
root = self.core["tox_root"]
requirements = resolve(root, groups)
self._install(list(requirements), PythonRun.__name__, "dependency-groups")
def _build_packages(self) -> list[Package]:
package_env = self.package_env
assert package_env is not None # noqa: S101
with package_env.display_context(self._has_display_suspended):
try:
packages = package_env.perform_packaging(self.conf)
except Skip as exception:
msg = f"{exception.args[0]} for package environment {package_env.conf['env_name']}"
raise Skip(msg) from exception
return packages
def add_skip_missing_interpreters_to_core(core: CoreConfigSet, options: Parsed) -> None:
def skip_missing_interpreters_post_process(value: bool) -> bool: # noqa: FBT001
if getattr(options, "skip_missing_interpreters", "config") != "config":
return StrConvert().to_bool(options.skip_missing_interpreters)
return value
core.add_config(
keys=["skip_missing_interpreters"],
default=True,
of_type=bool,
post_process=skip_missing_interpreters_post_process,
desc="skip running missing interpreters",
)
def add_extras_to_env(conf: EnvConfigSet) -> None:
conf.add_config(
keys=["extras"],
of_type=set[str],
default=set(),
desc="extras to install of the target package",
post_process=_normalize_extras,
)
def _normalize_extras(values: set[str]) -> set[str]:
# although _ and . is allowed this will be normalized during packaging to -
# https://packaging.python.org/en/latest/specifications/dependency-specifiers/#grammar
return {canonicalize_name(v) for v in values}
__all__ = [
"PythonRun",
"add_extras_to_env",
"add_skip_missing_interpreters_to_core",
]
| PythonRun |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/LegendItem.py | {
"start": 13287,
"end": 15390
} | class ____(GraphicsWidget):
"""Class responsible for drawing a single item in a LegendItem (sans label)
"""
sigClicked = QtCore.Signal(object)
def __init__(self, item):
GraphicsWidget.__init__(self)
self.item = item
self.setFixedWidth(20)
self.setFixedHeight(20)
def boundingRect(self):
return QtCore.QRectF(0, 0, 20, 20)
def paint(self, p, *args):
opts = self.item.opts
if opts.get('antialias'):
p.setRenderHint(p.RenderHint.Antialiasing)
visible = self.item.isVisible()
if not visible:
icon = invisibleEye.qicon
p.drawPixmap(QtCore.QPoint(1, 1), icon.pixmap(18, 18))
return
if not isinstance(self.item, ScatterPlotItem):
p.setPen(fn.mkPen(opts['pen']))
p.drawLine(0, 11, 20, 11)
if (opts.get('fillLevel', None) is not None and
opts.get('fillBrush', None) is not None):
p.setBrush(fn.mkBrush(opts['fillBrush']))
p.setPen(fn.mkPen(opts['pen']))
p.drawPolygon(QtGui.QPolygonF(
[QtCore.QPointF(2, 18), QtCore.QPointF(18, 2),
QtCore.QPointF(18, 18)]))
symbol = opts.get('symbol', None)
if symbol is not None:
if isinstance(self.item, PlotDataItem):
opts = self.item.scatter.opts
p.translate(10, 10)
drawSymbol(p, symbol, opts['size'], fn.mkPen(opts['pen']),
fn.mkBrush(opts['brush']))
if isinstance(self.item, BarGraphItem):
p.setBrush(fn.mkBrush(opts['brush']))
p.drawRect(QtCore.QRectF(2, 2, 18, 18))
def mouseClickEvent(self, event):
"""Use the mouseClick event to toggle the visibility of the plotItem
"""
if event.button() == QtCore.Qt.MouseButton.LeftButton:
visible = self.item.isVisible()
self.item.setVisible(not visible)
event.accept()
self.update()
self.sigClicked.emit(self.item)
| ItemSample |
python | bokeh__bokeh | tests/unit/bokeh/document/_util_document.py | {
"start": 1399,
"end": 1467
} | class ____(Model):
name = String(default="")
| ModelThatOverridesName |
python | dask__dask | dask/dataframe/dask_expr/_repartition.py | {
"start": 7518,
"end": 8628
} | class ____(Repartition):
"""Increase the partition count"""
_parameters = ["frame", "new_partitions"]
def _divisions(self):
return (None,) * (1 + sum(self._nsplits))
@functools.cached_property
def _nsplits(self):
df = self.frame
div, mod = divmod(self.new_partitions, df.npartitions)
nsplits = [div] * df.npartitions
nsplits[-1] += mod
if len(nsplits) != df.npartitions:
raise ValueError(f"nsplits should have len={df.npartitions}")
return nsplits
def _layer(self):
dsk = {}
nsplits = self._nsplits
df = self.frame
new_name = self._name
split_name = f"split-{new_name}"
j = 0
for i, k in enumerate(nsplits):
if k == 1:
dsk[new_name, j] = (df._name, i)
j += 1
else:
dsk[split_name, i] = (split_evenly, (df._name, i), k)
for jj in range(k):
dsk[new_name, j] = (getitem, (split_name, i), jj)
j += 1
return dsk
| RepartitionToMore |
python | Textualize__textual | tests/option_list/test_option_removal.py | {
"start": 288,
"end": 4429
} | class ____(App[None]):
"""Test option list application."""
def compose(self) -> ComposeResult:
yield OptionList(
Option("0", id="0"),
Option("1", id="1"),
)
async def test_remove_first_option_via_index() -> None:
"""It should be possible to remove the first option of an option list, via index."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.option_count == 2
assert option_list.highlighted == 0
option_list.remove_option_at_index(0)
assert option_list.option_count == 1
assert option_list.highlighted == 0
async def test_remove_first_option_via_id() -> None:
"""It should be possible to remove the first option of an option list, via ID."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.option_count == 2
assert option_list.highlighted == 0
option_list.remove_option("0")
assert option_list.option_count == 1
assert option_list.highlighted == 0
async def test_remove_last_option_via_index() -> None:
"""It should be possible to remove the last option of an option list, via index."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.option_count == 2
assert option_list.highlighted == 0
option_list.remove_option_at_index(1)
assert option_list.option_count == 1
assert option_list.highlighted == 0
async def test_remove_last_option_via_id() -> None:
"""It should be possible to remove the last option of an option list, via ID."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.option_count == 2
assert option_list.highlighted == 0
option_list.remove_option("1")
assert option_list.option_count == 1
assert option_list.highlighted == 0
async def test_remove_all_options_via_index() -> None:
"""It should be possible to remove all options via index."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.option_count == 2
assert option_list.highlighted == 0
option_list.remove_option_at_index(0)
option_list.remove_option_at_index(0)
assert option_list.option_count == 0
assert option_list.highlighted is None
async def test_remove_all_options_via_id() -> None:
"""It should be possible to remove all options via ID."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
assert option_list.option_count == 2
assert option_list.highlighted == 0
option_list.remove_option("0")
option_list.remove_option("1")
assert option_list.option_count == 0
assert option_list.highlighted is None
async def test_remove_invalid_id() -> None:
"""Attempting to remove an option ID that doesn't exist should raise an exception."""
async with OptionListApp().run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).remove_option("does-not-exist")
async def test_remove_invalid_index() -> None:
"""Attempting to remove an option index that doesn't exist should raise an exception."""
async with OptionListApp().run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).remove_option_at_index(23)
async def test_remove_with_hover_on_last_option():
"""https://github.com/Textualize/textual/issues/3270"""
async with OptionListApp().run_test() as pilot:
await pilot.hover(OptionList, Offset(1, 1) + Offset(2, 1))
option_list = pilot.app.query_one(OptionList)
assert option_list._mouse_hovering_over == 1
option_list.remove_option_at_index(0)
assert option_list._mouse_hovering_over == None
| OptionListApp |
python | pytorch__pytorch | test/distributed/test_c10d_common.py | {
"start": 60572,
"end": 71122
} | class ____(MultiProcessTestCase):
def setUp(self):
super().setUp()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_get_backend_name(self):
dpg = DummyProcessGroup(0, 1)
self.assertEqual("Dummy", dpg.name())
# dist.Backend.register_backend(
# "dummy", PythonProcessGroupExtensionTest.create_dummy
# )
# # os.environ["MASTER_ADDR"] = "localhost"
# # os.environ["MASTER_PORT"] = "6789"
# # dist.init_process_group(
# # "cpu:dummy", rank=0, world_size=1,
# # )
# dpg = DummyProcessGroup(0, 1)
# from torch.distributed.distributed_c10d import _canonicalize_group_rank
# self.assertEqual(123, _canonicalize_group_rank(dpg, group_rank=123, return_global=False))
# with self.assertRaises(RuntimeError):
# _canonicalize_group_rank(dpg, group_rank=123, return_global=True)
def test_canonicalize_helper(self):
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6789"
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
dpg = DummyProcessGroup(0, 124)
from torch.distributed.distributed_c10d import _canonicalize_group_rank
# we ensure that a process group with more ranks than the 'default' group can still be used.
# e.g. if the dpg had 124 ranks and the world had only 2 ranks.
self.assertEqual(
123, _canonicalize_group_rank(dpg, group_rank=123, return_global=False)
)
self.assertEqual(
0, _canonicalize_group_rank(dpg, global_rank=0, return_global=True)
)
with self.assertRaises(ValueError):
# TODO(whc) this is actually catching the wrong error:
# ValueError: Group <__mp_main__.DummyProcessGroup object at 0x7faa0a844540> is not registered,
# please create group with torch.distributed.new_group API
# It should be catching a different error where the rank doesn't exist in the global mapping.
# But it's still testing the same part of the _canonicalize_group_rank helper so maybe this is fine
_canonicalize_group_rank(dpg, group_rank=123, return_global=True)
dist.destroy_process_group()
def test_backend_class_attr(self):
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "dummy")
self.assertEqual(
dist.Backend._plugins["DUMMY"].creator_fn,
PythonProcessGroupExtensionTest.create_dummy,
)
def test_is_backend_available(self):
self.assertEqual(dist.is_ucc_available(), dist.is_backend_available("ucc"))
self.assertFalse(dist.is_backend_available("dummy"))
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
self.assertTrue(dist.is_backend_available("dummy"))
def test_backend_config(self):
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
# Ensure backend config can be created with the following arguments
backend_config_strings_and_expected_values = [
(dist.Backend.GLOO, "cpu:gloo,cuda:gloo"),
(dist.Backend.NCCL, "cuda:nccl"),
(dist.Backend.MPI, "cpu:mpi,cuda:mpi"),
(dist.Backend.UCC, "cpu:ucc,cuda:ucc"),
(dist.Backend.DUMMY, "cpu:dummy,cuda:dummy"),
("DUMMY", "cpu:dummy,cuda:dummy"),
("dummy", "cpu:dummy,cuda:dummy"),
("cpu:dummy,cuda:dummy", "cpu:dummy,cuda:dummy"),
("cpu:dummy,cuda:nccl", "cpu:dummy,cuda:nccl"),
("cpu:gloo,cuda:dummy", "cpu:gloo,cuda:dummy"),
("cpu:gloo,cuda:nccl", "cpu:gloo,cuda:nccl"),
]
if TEST_XPU:
# Override backend_config_strings_and_expected_values for Intel GPU.
backend_config_strings_and_expected_values[4:10] = [
(dist.Backend.DUMMY, "cpu:dummy,cuda:dummy,xpu:dummy"),
("DUMMY", "cpu:dummy,cuda:dummy,xpu:dummy"),
("dummy", "cpu:dummy,cuda:dummy,xpu:dummy"),
("cpu:dummy,xpu:dummy", "cpu:dummy,xpu:dummy"),
("cpu:dummy,xpu:xccl", "cpu:dummy,xpu:xccl"),
("cpu:gloo,xpu:dummy", "cpu:gloo,xpu:dummy"),
("cpu:gloo,xpu:xccl", "cpu:gloo,xpu:xccl"),
]
for config_str, expected_value in backend_config_strings_and_expected_values:
with self.subTest(config_str):
# ensures these configs strings are valid and no ValueError is raised
config = dist.BackendConfig(config_str)
self.assertEqual(str(config), expected_value)
# Ensure backend config will raise ValueError with the following arguments
invalid_backend_config_strings = [
"cpu:gloo,cuda:nccl,", # trailing comma
"cpu:gloo,cuda:nccl,cpu:dummy", # duplicate device
"cpu:gloo,xpu:xccl,", # trailing comma
"cpu:gloo,xpu:xccl,cpu:dummy", # duplicate device
]
for config_str in invalid_backend_config_strings:
with self.subTest(config_str):
with self.assertRaises(ValueError):
dist.BackendConfig(config_str)
def test_init_process_group_with_multiple_backends(self):
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6789"
dist.init_process_group(
"cpu:dummy,cuda:dummy,xpu:dummy", rank=self.rank, world_size=self.world_size
)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
dist.barrier()
dist.destroy_process_group()
class Options:
group_name = None
split_from = None
split_color = None
global_ranks_in_group = None
def __init__(self) -> None:
pass
def create(self):
pass
@staticmethod
def create_dummy(store, group_rank, group_size, timeout):
return DummyProcessGroup(group_rank, group_size)
@staticmethod
def create_dummy_ext(dist_opts, pg_options=None):
return DummyProcessGroup(dist_opts.group_rank, dist_opts.group_size)
def test_collectives(self):
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6789"
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
for tensor in output_tensor_list:
self.assertEqual(tensor, input_tensor)
# test all_reduce
input_tensor = torch.ones(2, 2) * 7
dist.all_reduce(input_tensor)
self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)
# test broadcast
input_tensor = torch.zeros(2, 2)
dist.broadcast(input_tensor, 0, async_op=True).wait()
self.assertEqual(torch.ones(2, 2), input_tensor)
# test reduce_scatter
output_tensor = torch.zeros(2, 2)
input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]
dist.reduce_scatter(output_tensor, input_tensor_list)
self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)
dist.barrier()
dist.destroy_process_group()
def test_send_recv(self):
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6789"
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
input_tensor = torch.zeros(2, 2)
dist.send(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)
with self.assertRaises(ValueError):
dist.send(input_tensor, dist.get_rank())
with self.assertRaises(ValueError):
dist.send(input_tensor, group_dst=dist.get_rank())
with self.assertRaises(ValueError):
dist.send(input_tensor, dist.get_rank(), group_dst=dist.get_rank())
with self.assertRaises(ValueError):
dist.send(input_tensor)
# test recv
input_tensor = torch.zeros(2, 2)
dist.recv(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)
with self.assertRaises(ValueError):
dist.recv(input_tensor, src=0, group_src=0)
dist.barrier()
# intentionally not calling into `destroy_process_group` as not all
# user applications would explicitly that.
def test_shutdown(self) -> None:
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6789"
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
pg = c10d._get_default_group()
dist.destroy_process_group()
self.assertTrue(pg._shutdown)
def test_abort(self) -> None:
dist.Backend.register_backend(
"dummy", PythonProcessGroupExtensionTest.create_dummy
)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6789"
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
pg = c10d._get_default_group()
c10d._abort_process_group()
self.assertTrue(pg._aborted)
instantiate_parametrized_tests(CommonDistributedDataParallelTest)
| PythonProcessGroupExtensionTest |
python | Textualize__textual | tests/animations/test_progress_bar_animation.py | {
"start": 282,
"end": 1611
} | class ____(App[None]):
def compose(self) -> ComposeResult:
yield ProgressBar()
async def test_progress_bar_animates_on_full() -> None:
"""An indeterminate progress bar is not fully highlighted when animating."""
app = ProgressBarApp()
app.animation_level = "full"
async with app.run_test():
bar_renderable = app.query_one(Bar).render()
start, end = bar_renderable.highlight_range
assert start != 0 or end != app.query_one(Bar).size.width
async def test_progress_bar_animates_on_basic() -> None:
"""An indeterminate progress bar is not fully highlighted when animating."""
app = ProgressBarApp()
app.animation_level = "basic"
async with app.run_test():
bar_renderable = app.query_one(Bar).render()
start, end = bar_renderable.highlight_range
assert start != 0 or end != app.query_one(Bar).size.width
async def test_progress_bar_does_not_animate_on_none() -> None:
"""An indeterminate progress bar is fully highlighted when not animating."""
app = ProgressBarApp()
app.animation_level = "none"
async with app.run_test():
bar_renderable = app.query_one(Bar).render()
start, end = bar_renderable.highlight_range
assert start == 0
assert end == app.query_one(Bar).size.width
| ProgressBarApp |
python | ray-project__ray | python/ray/serve/_private/benchmarks/streaming/common.py | {
"start": 572,
"end": 1535
} | class ____:
def __init__(self, tokens_per_request: int):
self._tokens_per_request = tokens_per_request
# Switch off logging to minimize its impact
logging.getLogger("ray").setLevel(logging.WARNING)
logging.getLogger("ray.serve").setLevel(logging.WARNING)
def stream(self):
payload = PayloadPydantic(
text="Test output",
floats=[float(f) for f in range(1, 100)],
ints=list(range(1, 100)),
ts=time.time(),
reason="Success!",
)
for i in range(self._tokens_per_request):
yield payload
async def aio_stream(self):
payload = PayloadPydantic(
text="Test output",
floats=[float(f) for f in range(1, 100)],
ints=list(range(1, 100)),
ts=time.time(),
reason="Success!",
)
for i in range(self._tokens_per_request):
yield payload
| Endpoint |
python | pandas-dev__pandas | pandas/core/indexers/objects.py | {
"start": 14107,
"end": 17065
} | class ____(BaseIndexer):
"""
Creates window boundaries for fixed-length windows that include the current row.
Parameters
----------
index_array : np.ndarray, default None
Array-like structure representing the indices for the data points.
If None, the default indices are assumed. This can be useful for
handling non-uniform indices in data, such as in time series
with irregular timestamps.
window_size : int, default 0
Size of the moving window. This is the number of observations used
for calculating the statistic. The default is to consider all
observations within the window.
**kwargs
Additional keyword arguments passed to the subclass's methods.
See Also
--------
DataFrame.rolling : Provides rolling window calculations.
api.indexers.VariableWindowIndexer : Calculate window bounds based on
variable-sized windows.
Examples
--------
>>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
>>> df.rolling(window=indexer, min_periods=1).sum()
B
0 1.0
1 3.0
2 2.0
3 4.0
4 4.0
"""
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
"""
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
step : int, default None
step passed from the top level rolling API
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
if center:
raise ValueError("Forward-looking windows can't have center=True")
if closed is not None:
raise ValueError(
"Forward-looking windows don't support setting the closed argument"
)
if step is None:
step = 1
start = np.arange(0, num_values, step, dtype="int64")
end = start + self.window_size
if self.window_size:
end = np.clip(end, 0, num_values)
return start, end
| FixedForwardWindowIndexer |
python | sympy__sympy | sympy/geometry/point.py | {
"start": 29721,
"end": 36661
} | class ____(Point):
"""A point in a 3-dimensional Euclidean space.
Parameters
==========
coords
A sequence of 3 coordinate values.
Attributes
==========
x
y
z
length
Raises
======
TypeError
When trying to add or subtract points with different dimensions.
When `intersection` is called with object other than a Point.
Examples
========
>>> from sympy import Point3D
>>> from sympy.abc import x
>>> Point3D(1, 2, 3)
Point3D(1, 2, 3)
>>> Point3D([1, 2, 3])
Point3D(1, 2, 3)
>>> Point3D(0, x, 3)
Point3D(0, x, 3)
Floats are automatically converted to Rational unless the
evaluate flag is False:
>>> Point3D(0.5, 0.25, 2)
Point3D(1/2, 1/4, 2)
>>> Point3D(0.5, 0.25, 3, evaluate=False)
Point3D(0.5, 0.25, 3)
"""
_ambient_dimension = 3
def __new__(cls, *args, _nocheck=False, **kwargs):
if not _nocheck:
kwargs['dim'] = 3
args = Point(*args, **kwargs)
return GeometryEntity.__new__(cls, *args)
def __contains__(self, item):
return item == self
@staticmethod
def are_collinear(*points):
"""Is a sequence of points collinear?
Test whether or not a set of points are collinear. Returns True if
the set of points are collinear, or False otherwise.
Parameters
==========
points : sequence of Point
Returns
=======
are_collinear : boolean
See Also
========
sympy.geometry.line.Line3D
Examples
========
>>> from sympy import Point3D
>>> from sympy.abc import x
>>> p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 1)
>>> p3, p4, p5 = Point3D(2, 2, 2), Point3D(x, x, x), Point3D(1, 2, 6)
>>> Point3D.are_collinear(p1, p2, p3, p4)
True
>>> Point3D.are_collinear(p1, p2, p3, p5)
False
"""
return Point.is_collinear(*points)
def direction_cosine(self, point):
"""
Gives the direction cosine between 2 points
Parameters
==========
p : Point3D
Returns
=======
list
Examples
========
>>> from sympy import Point3D
>>> p1 = Point3D(1, 2, 3)
>>> p1.direction_cosine(Point3D(2, 3, 5))
[sqrt(6)/6, sqrt(6)/6, sqrt(6)/3]
"""
a = self.direction_ratio(point)
b = sqrt(Add(*(i**2 for i in a)))
return [(point.x - self.x) / b,(point.y - self.y) / b,
(point.z - self.z) / b]
def direction_ratio(self, point):
"""
Gives the direction ratio between 2 points
Parameters
==========
p : Point3D
Returns
=======
list
Examples
========
>>> from sympy import Point3D
>>> p1 = Point3D(1, 2, 3)
>>> p1.direction_ratio(Point3D(2, 3, 5))
[1, 1, 2]
"""
return [(point.x - self.x),(point.y - self.y),(point.z - self.z)]
def intersection(self, other):
"""The intersection between this point and another GeometryEntity.
Parameters
==========
other : GeometryEntity or sequence of coordinates
Returns
=======
intersection : list of Points
Notes
=====
The return value will either be an empty list if there is no
intersection, otherwise it will contain this point.
Examples
========
>>> from sympy import Point3D
>>> p1, p2, p3 = Point3D(0, 0, 0), Point3D(1, 1, 1), Point3D(0, 0, 0)
>>> p1.intersection(p2)
[]
>>> p1.intersection(p3)
[Point3D(0, 0, 0)]
"""
if not isinstance(other, GeometryEntity):
other = Point(other, dim=3)
if isinstance(other, Point3D):
if self == other:
return [self]
return []
return other.intersection(self)
def scale(self, x=1, y=1, z=1, pt=None):
"""Scale the coordinates of the Point by multiplying by
``x`` and ``y`` after subtracting ``pt`` -- default is (0, 0) --
and then adding ``pt`` back again (i.e. ``pt`` is the point of
reference for the scaling).
See Also
========
translate
Examples
========
>>> from sympy import Point3D
>>> t = Point3D(1, 1, 1)
>>> t.scale(2)
Point3D(2, 1, 1)
>>> t.scale(2, 2)
Point3D(2, 2, 1)
"""
if pt:
pt = Point3D(pt)
return self.translate(*(-pt).args).scale(x, y, z).translate(*pt.args)
return Point3D(self.x*x, self.y*y, self.z*z)
def transform(self, matrix):
"""Return the point after applying the transformation described
by the 4x4 Matrix, ``matrix``.
See Also
========
sympy.geometry.point.Point3D.scale
sympy.geometry.point.Point3D.translate
"""
if not (matrix.is_Matrix and matrix.shape == (4, 4)):
raise ValueError("matrix must be a 4x4 matrix")
x, y, z = self.args
m = Transpose(matrix)
return Point3D(*(Matrix(1, 4, [x, y, z, 1])*m).tolist()[0][:3])
def translate(self, x=0, y=0, z=0):
"""Shift the Point by adding x and y to the coordinates of the Point.
See Also
========
scale
Examples
========
>>> from sympy import Point3D
>>> t = Point3D(0, 1, 1)
>>> t.translate(2)
Point3D(2, 1, 1)
>>> t.translate(2, 2)
Point3D(2, 3, 1)
>>> t + Point3D(2, 2, 2)
Point3D(2, 3, 3)
"""
return Point3D(self.x + x, self.y + y, self.z + z)
@property
def coordinates(self):
"""
Returns the three coordinates of the Point.
Examples
========
>>> from sympy import Point3D
>>> p = Point3D(0, 1, 2)
>>> p.coordinates
(0, 1, 2)
"""
return self.args
@property
def x(self):
"""
Returns the X coordinate of the Point.
Examples
========
>>> from sympy import Point3D
>>> p = Point3D(0, 1, 3)
>>> p.x
0
"""
return self.args[0]
@property
def y(self):
"""
Returns the Y coordinate of the Point.
Examples
========
>>> from sympy import Point3D
>>> p = Point3D(0, 1, 2)
>>> p.y
1
"""
return self.args[1]
@property
def z(self):
"""
Returns the Z coordinate of the Point.
Examples
========
>>> from sympy import Point3D
>>> p = Point3D(0, 1, 1)
>>> p.z
1
"""
return self.args[2]
| Point3D |
python | euske__pdfminer | pdfminer/pdfparser.py | {
"start": 399,
"end": 463
} | class ____(PDFException):
pass
## PDFParser
##
| PDFSyntaxError |
python | pytorch__pytorch | benchmarks/transformer/sdp.py | {
"start": 415,
"end": 1462
} | class ____:
batch_size: int
num_heads: int
max_sequence_len: int
embed_dimension: int
dtype: torch.dtype
pad_percentage: Optional[float]
enable_math: bool
enable_flash: bool
enable_mem_efficient: bool
enable_cudnn: bool
def get_entries(self) -> list:
return [
self.batch_size,
self.num_heads,
self.max_sequence_len,
self.embed_dimension,
self.dtype,
self.pad_percentage,
self.enable_math,
self.enable_flash,
self.enable_mem_efficient,
self.enable_cudnn,
]
@classmethod
def get_entry_names(cls) -> list[str]:
return [
"batch_size",
"num_heads",
"max_sequence_len",
"embed_dimension",
"dtype",
"pad_percentage",
"enable_math",
"enable_flash",
"enable_mem_efficient",
"enable_cudnn",
]
@dataclass(frozen=True)
| ExperimentConfig |
python | huggingface__transformers | tests/models/clap/test_modeling_clap.py | {
"start": 16248,
"end": 18362
} | class ____:
def __init__(self, parent, text_kwargs=None, audio_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if audio_kwargs is None:
audio_kwargs = {}
self.parent = parent
self.text_model_tester = ClapTextModelTester(parent, **text_kwargs)
self.audio_model_tester = ClapAudioModelTester(parent, **audio_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.is_training = is_training
def prepare_config_and_inputs(self):
_, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
_, input_features = self.audio_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, input_features
def get_config(self):
return ClapConfig(
text_config=self.text_model_tester.get_config().to_dict(),
audio_config=self.audio_model_tester.get_config().to_dict(),
projection_dim=64,
)
def create_and_check_model(self, config, input_ids, attention_mask, input_features):
model = ClapModel(config).to(torch_device).eval()
with torch.no_grad():
result = model(input_ids, input_features, attention_mask)
self.parent.assertEqual(
result.logits_per_audio.shape, (self.audio_model_tester.batch_size, self.text_model_tester.batch_size)
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.text_model_tester.batch_size, self.audio_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, input_features = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"input_features": input_features,
"return_loss": True,
}
return config, inputs_dict
@require_torch
| ClapModelTester |
python | pydantic__pydantic | pydantic/v1/networks.py | {
"start": 16018,
"end": 17098
} | class ____(AnyUrl):
allowed_schemes = {'kafka'}
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {
'domain': 'localhost',
'port': '9092',
}
def stricturl(
*,
strip_whitespace: bool = True,
min_length: int = 1,
max_length: int = 2**16,
tld_required: bool = True,
host_required: bool = True,
allowed_schemes: Optional[Collection[str]] = None,
) -> Type[AnyUrl]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
min_length=min_length,
max_length=max_length,
tld_required=tld_required,
host_required=host_required,
allowed_schemes=allowed_schemes,
)
return type('UrlValue', (AnyUrl,), namespace)
def import_email_validator() -> None:
global email_validator
try:
import email_validator
except ImportError as e:
raise ImportError('email-validator is not installed, run `pip install pydantic[email]`') from e
| KafkaDsn |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType42.py | {
"start": 638,
"end": 1395
} | class ____(Protocol[T, P]):
def __init__(self, *args: P.args, **kwds: P.kwargs): ...
def make_a(x: Callable[P, R]) -> Type[A[R, P]]: ...
@overload
def func2(x: Type[A[R, P]]) -> Type[A[R, P]]: ...
@overload
def func2(x: Callable[P, R]) -> Type[A[R, P]]: ...
def func2(x: Union[Type[A[R, P]], Callable[P, R]]) -> Type[A[R, P]]: ...
def func3():
def foo(x: int) -> str: ...
x = make_a(foo)
y = func2(x)
z = func2(make_a(foo))
reveal_type(y, expected_text="type[A[str, (x: int)]]")
reveal_type(z, expected_text="type[A[str, (x: int)]]")
def func4(my_dict: dict[str, str]):
reveal_type(my_dict.get("item1", ""), expected_text="str")
reveal_type(my_dict.get("item1", my_dict.get("item2", "")), expected_text="str")
| A |
python | PyCQA__pylint | tests/functional/g/generic_alias/generic_alias_mixed_py39.py | {
"start": 737,
"end": 873
} | class ____(typing.List[typing.Iterable[int]]):
pass
# Missing implementation for 'collections.abc' derived classes
| DerivedListIterable |
python | doocs__leetcode | solution/3700-3799/3738.Longest Non-Decreasing Subarray After Replacing at Most One Element/Solution.py | {
"start": 0,
"end": 717
} | class ____:
def longestSubarray(self, nums: List[int]) -> int:
n = len(nums)
left = [1] * n
right = [1] * n
for i in range(1, n):
if nums[i] >= nums[i - 1]:
left[i] = left[i - 1] + 1
for i in range(n - 2, -1, -1):
if nums[i] <= nums[i + 1]:
right[i] = right[i + 1] + 1
ans = max(left)
for i in range(n):
a = 0 if i - 1 < 0 else left[i - 1]
b = 0 if i + 1 >= n else right[i + 1]
if i - 1 >= 0 and i + 1 < n and nums[i - 1] > nums[i + 1]:
ans = max(ans, a + 1, b + 1)
else:
ans = max(ans, a + b + 1)
return ans
| Solution |
python | astropy__astropy | astropy/samp/errors.py | {
"start": 475,
"end": 555
} | class ____(Exception):
"""
SAMP Client exceptions.
"""
| SAMPClientError |
python | jschneier__django-storages | tests/test_s3.py | {
"start": 748,
"end": 869
} | class ____(s3.S3ManifestStaticStorage):
def read_manifest(self):
return None
| S3ManifestStaticStorageTestStorage |
python | langchain-ai__langchain | libs/core/langchain_core/chat_history.py | {
"start": 468,
"end": 7159
} | class ____(ABC):
"""Abstract base class for storing chat message history.
Implementations guidelines:
Implementations are expected to over-ride all or some of the following methods:
* add_messages: sync variant for bulk addition of messages
* aadd_messages: async variant for bulk addition of messages
* messages: sync variant for getting messages
* aget_messages: async variant for getting messages
* clear: sync variant for clearing messages
* aclear: async variant for clearing messages
add_messages contains a default implementation that calls add_message
for each message in the sequence. This is provided for backwards compatibility
with existing implementations which only had add_message.
Async variants all have default implementations that call the sync variants.
Implementers can choose to over-ride the async implementations to provide
truly async implementations.
Usage guidelines:
When used for updating history, users should favor usage of `add_messages`
over `add_message` or other variants like `add_user_message` and `add_ai_message`
to avoid unnecessary round-trips to the underlying persistence layer.
Example: Shows a default implementation.
```python
import json
import os
from langchain_core.messages import messages_from_dict, message_to_dict
class FileChatMessageHistory(BaseChatMessageHistory):
storage_path: str
session_id: str
@property
def messages(self) -> list[BaseMessage]:
try:
with open(
os.path.join(self.storage_path, self.session_id),
"r",
encoding="utf-8",
) as f:
messages_data = json.load(f)
return messages_from_dict(messages_data)
except FileNotFoundError:
return []
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
all_messages = list(self.messages) # Existing messages
all_messages.extend(messages) # Add new messages
serialized = [message_to_dict(message) for message in all_messages]
file_path = os.path.join(self.storage_path, self.session_id)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w", encoding="utf-8") as f:
json.dump(serialized, f)
def clear(self) -> None:
file_path = os.path.join(self.storage_path, self.session_id)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w", encoding="utf-8") as f:
json.dump([], f)
```
"""
messages: list[BaseMessage]
"""A property or attribute that returns a list of messages.
In general, getting the messages may involve IO to the underlying
persistence layer, so this operation is expected to incur some
latency.
"""
async def aget_messages(self) -> list[BaseMessage]:
"""Async version of getting messages.
Can over-ride this method to provide an efficient async implementation.
In general, fetching messages may involve IO to the underlying
persistence layer.
Returns:
The messages.
"""
return await run_in_executor(None, lambda: self.messages)
def add_user_message(self, message: HumanMessage | str) -> None:
"""Convenience method for adding a human message string to the store.
!!! note
This is a convenience method. Code should favor the bulk `add_messages`
interface instead to save on round-trips to the persistence layer.
This method may be deprecated in a future release.
Args:
message: The `HumanMessage` to add to the store.
"""
if isinstance(message, HumanMessage):
self.add_message(message)
else:
self.add_message(HumanMessage(content=message))
def add_ai_message(self, message: AIMessage | str) -> None:
"""Convenience method for adding an `AIMessage` string to the store.
!!! note
This is a convenience method. Code should favor the bulk `add_messages`
interface instead to save on round-trips to the persistence layer.
This method may be deprecated in a future release.
Args:
message: The `AIMessage` to add.
"""
if isinstance(message, AIMessage):
self.add_message(message)
else:
self.add_message(AIMessage(content=message))
def add_message(self, message: BaseMessage) -> None:
"""Add a Message object to the store.
Args:
message: A BaseMessage object to store.
Raises:
NotImplementedError: If the sub-class has not implemented an efficient
`add_messages` method.
"""
if type(self).add_messages != BaseChatMessageHistory.add_messages:
# This means that the sub-class has implemented an efficient add_messages
# method, so we should use it.
self.add_messages([message])
else:
msg = (
"add_message is not implemented for this class. "
"Please implement add_message or add_messages."
)
raise NotImplementedError(msg)
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
"""Add a list of messages.
Implementations should over-ride this method to handle bulk addition of messages
in an efficient manner to avoid unnecessary round-trips to the underlying store.
Args:
messages: A sequence of `BaseMessage` objects to store.
"""
for message in messages:
self.add_message(message)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
"""Async add a list of messages.
Args:
messages: A sequence of `BaseMessage` objects to store.
"""
await run_in_executor(None, self.add_messages, messages)
@abstractmethod
def clear(self) -> None:
"""Remove all messages from the store."""
async def aclear(self) -> None:
"""Async remove all messages from the store."""
await run_in_executor(None, self.clear)
def __str__(self) -> str:
"""Return a string representation of the chat history."""
return get_buffer_string(self.messages)
| BaseChatMessageHistory |
python | ray-project__ray | python/ray/experimental/channel/shared_memory_channel.py | {
"start": 3059,
"end": 5180
} | class ____(ChannelOutputType):
def __init__(
self,
*,
buffer_size_bytes: Optional[int] = None,
num_shm_buffers: Optional[int] = None,
):
"""
Args:
buffer_size_bytes: The initial buffer size in bytes for messages
that can be passed between tasks in the DAG. The buffers will
be automatically resized if larger messages are written to the
channel.
num_shm_buffers: The number of shared memory buffers per channel.
Note: In the case of multiple nodes, we only support 1 shared
memory buffer.
"""
super().__init__()
from ray.dag import DAGContext
ctx = DAGContext.get_current()
if buffer_size_bytes is None:
buffer_size_bytes = ctx.buffer_size_bytes
self.buffer_size_bytes = buffer_size_bytes
if num_shm_buffers is None:
num_shm_buffers = 1
self._num_shm_buffers = num_shm_buffers
def create_channel(
self,
writer: Optional["ray.actor.ActorHandle"],
reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]],
driver_actor_id: Optional[str] = None,
) -> "Channel":
"""
Instantiate a ChannelInterface class that can be used
to pass data of this type.
Args:
writer: The actor that may write to the channel. None signifies the driver.
reader_and_node_list: A list of tuples, where each tuple contains a reader
actor handle and the node ID where the actor is located.
driver_actor_id: If this channel is read by a driver and that driver is an
actual actor, this will be the actor ID of that driver actor.
Returns:
A ChannelInterface that can be used to pass data
of this type.
"""
return CompositeChannel(
writer,
reader_and_node_list,
self._num_shm_buffers,
driver_actor_id,
)
@PublicAPI(stability="alpha")
| SharedMemoryType |
python | ray-project__ray | python/ray/dag/tests/test_py_obj_scanner.py | {
"start": 102,
"end": 2390
} | class ____:
pass
def test_simple_replace():
scanner = _PyObjScanner(source_type=Source)
my_objs = [Source(), [Source(), {"key": Source()}]]
found = scanner.find_nodes(my_objs)
assert len(found) == 3
replaced = scanner.replace_nodes({obj: 1 for obj in found})
assert replaced == [1, [1, {"key": 1}]]
def test_replace_multiple_types():
class OtherSource:
pass
scanner = _PyObjScanner(source_type=(Source, OtherSource))
my_objs = [Source(), [Source(), {"key": Source(), "key2": OtherSource()}]]
found = scanner.find_nodes(my_objs)
assert len(found) == 4
replaced = scanner.replace_nodes(
{obj: 1 if isinstance(obj, Source) else 2 for obj in found}
)
assert replaced == [1, [1, {"key": 1, "key2": 2}]]
def test_replace_nested_in_obj():
"""Test that the source can be nested in arbitrary objects."""
scanner = _PyObjScanner(source_type=Source)
class Outer:
def __init__(self, inner: Any):
self._inner = inner
def __eq__(self, other):
return self._inner == other._inner
my_objs = [Outer(Source()), Outer(Outer(Source())), Outer((Source(),))]
found = scanner.find_nodes(my_objs)
assert len(found) == 3
replaced = scanner.replace_nodes({obj: 1 for obj in found})
assert replaced == [Outer(1), Outer(Outer(1)), Outer((1,))]
def test_scanner_clear():
"""Test scanner clear to make the scanner GCable"""
prev_len = len(_instances)
def call_find_nodes():
scanner = _PyObjScanner(source_type=Source)
my_objs = [Source(), [Source(), {"key": Source()}]]
scanner.find_nodes(my_objs)
scanner.clear()
assert id(scanner) not in _instances
call_find_nodes()
assert prev_len == len(_instances)
def call_find_and_replace_nodes():
scanner = _PyObjScanner(source_type=Source)
my_objs = [Source(), [Source(), {"key": Source()}]]
found = scanner.find_nodes(my_objs)
scanner.replace_nodes({obj: 1 for obj in found})
scanner.clear()
assert id(scanner) not in _instances
call_find_and_replace_nodes()
assert prev_len == len(_instances)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| Source |
python | numpy__numpy | numpy/lib/tests/test_packbits.py | {
"start": 13573,
"end": 17543
} | class ____:
x = np.array([
[1, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
], dtype=np.uint8)
padded1 = np.zeros(57, dtype=np.uint8)
padded1[:49] = x.ravel()
padded1b = np.zeros(57, dtype=np.uint8)
padded1b[:49] = x[::-1].copy().ravel()
padded2 = np.zeros((9, 9), dtype=np.uint8)
padded2[:7, :7] = x
@pytest.mark.parametrize('bitorder', ('little', 'big'))
@pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))
def test_roundtrip(self, bitorder, count):
if count < 0:
# one extra zero of padding
cutoff = count - 1
else:
cutoff = count
# test complete invertibility of packbits and unpackbits with count
packed = np.packbits(self.x, bitorder=bitorder)
unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)
assert_equal(unpacked.dtype, np.uint8)
assert_array_equal(unpacked, self.padded1[:cutoff])
@pytest.mark.parametrize('kwargs', [
{}, {'count': None},
])
def test_count(self, kwargs):
packed = np.packbits(self.x)
unpacked = np.unpackbits(packed, **kwargs)
assert_equal(unpacked.dtype, np.uint8)
assert_array_equal(unpacked, self.padded1[:-1])
@pytest.mark.parametrize('bitorder', ('little', 'big'))
# delta==-1 when count<0 because one extra zero of padding
@pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))
def test_roundtrip_axis(self, bitorder, count):
if count < 0:
# one extra zero of padding
cutoff = count - 1
else:
cutoff = count
packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)
unpacked0 = np.unpackbits(packed0, axis=0, count=count,
bitorder=bitorder)
assert_equal(unpacked0.dtype, np.uint8)
assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])
packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)
unpacked1 = np.unpackbits(packed1, axis=1, count=count,
bitorder=bitorder)
assert_equal(unpacked1.dtype, np.uint8)
assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])
@pytest.mark.parametrize('kwargs', [
{}, {'count': None},
{'bitorder': 'little'},
{'bitorder': 'little', 'count': None},
{'bitorder': 'big'},
{'bitorder': 'big', 'count': None},
])
def test_axis_count(self, kwargs):
packed0 = np.packbits(self.x, axis=0)
unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
assert_equal(unpacked0.dtype, np.uint8)
if kwargs.get('bitorder', 'big') == 'big':
assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])
else:
assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])
packed1 = np.packbits(self.x, axis=1)
unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
assert_equal(unpacked1.dtype, np.uint8)
if kwargs.get('bitorder', 'big') == 'big':
assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])
else:
assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])
def test_bad_count(self):
packed0 = np.packbits(self.x, axis=0)
assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
packed1 = np.packbits(self.x, axis=1)
assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
packed = np.packbits(self.x)
assert_raises(ValueError, np.unpackbits, packed, count=-57)
| TestCount |
python | arrow-py__arrow | arrow/locales.py | {
"start": 60670,
"end": 61241
} | class ____(ArabicLocale):
names = ["ar-tn", "ar-dz"]
month_names = [
"",
"جانفي",
"فيفري",
"مارس",
"أفريل",
"ماي",
"جوان",
"جويلية",
"أوت",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
month_abbreviations = [
"",
"جانفي",
"فيفري",
"مارس",
"أفريل",
"ماي",
"جوان",
"جويلية",
"أوت",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
| AlgeriaTunisiaArabicLocale |
python | psf__requests | tests/test_utils.py | {
"start": 8688,
"end": 8910
} | class ____:
def test_valid(self):
assert address_in_network("192.168.1.1", "192.168.1.0/24")
def test_invalid(self):
assert not address_in_network("172.16.0.1", "192.168.1.0/24")
| TestAddressInNetwork |
python | getsentry__sentry | tests/apidocs/endpoints/events/test_group_tagkey_values.py | {
"start": 104,
"end": 598
} | class ____(APIDocsTestCase):
def setUp(self) -> None:
key, value = "foo", "bar"
event = self.create_event("a", tags={key: value})
self.login_as(user=self.user)
self.url = f"/api/0/organizations/{self.organization.slug}/issues/{event.group_id}/tags/{key}/values/"
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
| GroupTagKeyValuesDocs |
python | getsentry__sentry | src/sentry/search/eap/types.py | {
"start": 2112,
"end": 2309
} | class ____(str, Enum):
LOGS = "logs"
SPANS = "spans"
UPTIME_RESULTS = "uptime_results"
TRACEMETRICS = "tracemetrics"
PROFILE_FUNCTIONS = "profile_functions"
| SupportedTraceItemType |
python | huggingface__transformers | src/transformers/models/instructblipvideo/modeling_instructblipvideo.py | {
"start": 5826,
"end": 7804
} | class ____(PreTrainedModel):
config: InstructBlipVideoConfig
base_model_prefix = "blip"
input_modalities = ("video", "text")
supports_gradient_checkpointing = True
_supports_attention_backend = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_no_split_modules = [
"InstructBlipVideoQFormerEmbeddings",
"InstructBlipVideoAttention",
"InstructBlipVideoQFormerMultiHeadAttention",
"InstructBlipVideoQFormerSelfOutput",
]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
factor = self.config.initializer_range
if isinstance(module, InstructBlipVideoVisionEmbeddings):
init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
elif isinstance(module, (InstructBlipVideoForConditionalGeneration, InstructBlipVideoModel)):
init.zeros_(module.query_tokens)
# Adapted from transformers.models.siglip.modeling_siglip.eager_attention_forward -> InstructBlipVideo doesn't cast attn weights to fp32
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| InstructBlipVideoPreTrainedModel |
python | getsentry__sentry | tests/sentry/preprod/api/endpoints/pull_request/test_organization_pullrequest_comments.py | {
"start": 424,
"end": 17690
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.factory = APIRequestFactory()
self.integration = self.create_integration(
organization=self.organization,
provider="github",
name="Test GitHub Integration",
external_id="12345",
metadata={
"access_token": "test-token",
"expires_at": None,
"installation": {"id": 12345, "account": {"login": "getsentry"}},
},
)
self.repository = Repository.objects.create(
organization_id=self.organization.id,
name="getsentry/sentry",
provider="integrations:github",
integration_id=self.integration.id,
)
self.mock_general_comments = [
{
"id": 1,
"node_id": "IC_test1",
"url": "https://api.github.com/repos/getsentry/sentry/issues/comments/1",
"html_url": "https://github.com/getsentry/sentry/pull/100#issuecomment-1",
"body": "This looks great!",
"user": {
"login": "testuser1",
"id": 123,
"node_id": "U_test1",
"avatar_url": "https://avatars.githubusercontent.com/u/123",
"gravatar_id": "",
"url": "https://api.github.com/users/testuser1",
"html_url": "https://github.com/testuser1",
"followers_url": "https://api.github.com/users/testuser1/followers",
"following_url": "https://api.github.com/users/testuser1/following{/other_user}",
"gists_url": "https://api.github.com/users/testuser1/gists{/gist_id}",
"starred_url": "https://api.github.com/users/testuser1/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/testuser1/subscriptions",
"organizations_url": "https://api.github.com/users/testuser1/orgs",
"repos_url": "https://api.github.com/users/testuser1/repos",
"events_url": "https://api.github.com/users/testuser1/events{/privacy}",
"received_events_url": "https://api.github.com/users/testuser1/received_events",
"type": "User",
"site_admin": False,
},
"created_at": "2023-01-01T12:00:00Z",
"updated_at": "2023-01-01T12:00:00Z",
"issue_url": "https://api.github.com/repos/getsentry/sentry/issues/100",
"author_association": "CONTRIBUTOR",
},
{
"id": 2,
"node_id": "IC_test2",
"url": "https://api.github.com/repos/getsentry/sentry/issues/comments/2",
"html_url": "https://github.com/getsentry/sentry/pull/100#issuecomment-2",
"body": "Can you add tests?",
"user": {
"login": "testuser2",
"id": 456,
"node_id": "U_test2",
"avatar_url": "https://avatars.githubusercontent.com/u/456",
"gravatar_id": "",
"url": "https://api.github.com/users/testuser2",
"html_url": "https://github.com/testuser2",
"followers_url": "https://api.github.com/users/testuser2/followers",
"following_url": "https://api.github.com/users/testuser2/following{/other_user}",
"gists_url": "https://api.github.com/users/testuser2/gists{/gist_id}",
"starred_url": "https://api.github.com/users/testuser2/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/testuser2/subscriptions",
"organizations_url": "https://api.github.com/users/testuser2/orgs",
"repos_url": "https://api.github.com/users/testuser2/repos",
"events_url": "https://api.github.com/users/testuser2/events{/privacy}",
"received_events_url": "https://api.github.com/users/testuser2/received_events",
"type": "User",
"site_admin": False,
},
"created_at": "2023-01-02T10:30:00Z",
"updated_at": "2023-01-02T10:30:00Z",
"issue_url": "https://api.github.com/repos/getsentry/sentry/issues/100",
"author_association": "MEMBER",
},
]
self.mock_review_comments = [
{
"id": 10,
"node_id": "RC_test10",
"url": "https://api.github.com/repos/getsentry/sentry/pulls/comments/10",
"html_url": "https://github.com/getsentry/sentry/pull/100#discussion_r10",
"path": "src/components/Button.tsx",
"line": 25,
"body": "Consider using a const here",
"user": {
"login": "reviewer1",
"id": 789,
"node_id": "U_test789",
"avatar_url": "https://avatars.githubusercontent.com/u/789",
"gravatar_id": "",
"url": "https://api.github.com/users/reviewer1",
"html_url": "https://github.com/reviewer1",
"followers_url": "https://api.github.com/users/reviewer1/followers",
"following_url": "https://api.github.com/users/reviewer1/following{/other_user}",
"gists_url": "https://api.github.com/users/reviewer1/gists{/gist_id}",
"starred_url": "https://api.github.com/users/reviewer1/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/reviewer1/subscriptions",
"organizations_url": "https://api.github.com/users/reviewer1/orgs",
"repos_url": "https://api.github.com/users/reviewer1/repos",
"events_url": "https://api.github.com/users/reviewer1/events{/privacy}",
"received_events_url": "https://api.github.com/users/reviewer1/received_events",
"type": "User",
"site_admin": False,
},
"created_at": "2023-01-01T14:00:00Z",
"updated_at": "2023-01-01T14:00:00Z",
"author_association": "MEMBER",
"commit_id": "abc123def456789",
"original_commit_id": "abc123def456789",
"diff_hunk": "@@ -20,6 +20,8 @@ function Button() {",
"pull_request_url": "https://api.github.com/repos/getsentry/sentry/pulls/100",
"pull_request_review_id": 1,
"_links": {
"self": {
"href": "https://api.github.com/repos/getsentry/sentry/pulls/comments/10"
},
"html": {"href": "https://github.com/getsentry/sentry/pull/100#discussion_r10"},
"pull_request": {
"href": "https://api.github.com/repos/getsentry/sentry/pulls/100"
},
},
},
{
"id": 11,
"node_id": "RC_test11",
"url": "https://api.github.com/repos/getsentry/sentry/pulls/comments/11",
"html_url": "https://github.com/getsentry/sentry/pull/100#discussion_r11",
"path": "src/components/Button.tsx",
"line": 30,
"body": "Good catch!",
"user": {
"login": "reviewer2",
"id": 101,
"node_id": "U_test101",
"avatar_url": "https://avatars.githubusercontent.com/u/101",
"gravatar_id": "",
"url": "https://api.github.com/users/reviewer2",
"html_url": "https://github.com/reviewer2",
"followers_url": "https://api.github.com/users/reviewer2/followers",
"following_url": "https://api.github.com/users/reviewer2/following{/other_user}",
"gists_url": "https://api.github.com/users/reviewer2/gists{/gist_id}",
"starred_url": "https://api.github.com/users/reviewer2/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/reviewer2/subscriptions",
"organizations_url": "https://api.github.com/users/reviewer2/orgs",
"repos_url": "https://api.github.com/users/reviewer2/repos",
"events_url": "https://api.github.com/users/reviewer2/events{/privacy}",
"received_events_url": "https://api.github.com/users/reviewer2/received_events",
"type": "User",
"site_admin": False,
},
"created_at": "2023-01-01T15:00:00Z",
"updated_at": "2023-01-01T15:00:00Z",
"author_association": "CONTRIBUTOR",
"commit_id": "abc123def456789",
"original_commit_id": "abc123def456789",
"diff_hunk": "@@ -25,6 +25,8 @@ function Button() {",
"pull_request_url": "https://api.github.com/repos/getsentry/sentry/pulls/100",
"pull_request_review_id": 1,
"_links": {
"self": {
"href": "https://api.github.com/repos/getsentry/sentry/pulls/comments/11"
},
"html": {"href": "https://github.com/getsentry/sentry/pull/100#discussion_r11"},
"pull_request": {
"href": "https://api.github.com/repos/getsentry/sentry/pulls/100"
},
},
},
{
"id": 12,
"node_id": "RC_test12",
"url": "https://api.github.com/repos/getsentry/sentry/pulls/comments/12",
"html_url": "https://github.com/getsentry/sentry/pull/100#discussion_r12",
"path": "src/utils/helper.ts",
"line": 10,
"body": "This could be simplified",
"user": {
"login": "reviewer1",
"id": 789,
"node_id": "U_test789",
"avatar_url": "https://avatars.githubusercontent.com/u/789",
"gravatar_id": "",
"url": "https://api.github.com/users/reviewer1",
"html_url": "https://github.com/reviewer1",
"followers_url": "https://api.github.com/users/reviewer1/followers",
"following_url": "https://api.github.com/users/reviewer1/following{/other_user}",
"gists_url": "https://api.github.com/users/reviewer1/gists{/gist_id}",
"starred_url": "https://api.github.com/users/reviewer1/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/reviewer1/subscriptions",
"organizations_url": "https://api.github.com/users/reviewer1/orgs",
"repos_url": "https://api.github.com/users/reviewer1/repos",
"events_url": "https://api.github.com/users/reviewer1/events{/privacy}",
"received_events_url": "https://api.github.com/users/reviewer1/received_events",
"type": "User",
"site_admin": False,
},
"created_at": "2023-01-02T09:00:00Z",
"updated_at": "2023-01-02T09:00:00Z",
"author_association": "MEMBER",
"commit_id": "abc123def456789",
"original_commit_id": "abc123def456789",
"diff_hunk": "@@ -8,6 +8,8 @@ function helper() {",
"pull_request_url": "https://api.github.com/repos/getsentry/sentry/pulls/100",
"pull_request_review_id": 2,
"_links": {
"self": {
"href": "https://api.github.com/repos/getsentry/sentry/pulls/comments/12"
},
"html": {"href": "https://github.com/getsentry/sentry/pull/100#discussion_r12"},
"pull_request": {
"href": "https://api.github.com/repos/getsentry/sentry/pulls/100"
},
},
},
]
def _make_request(self, repo_name="getsentry/sentry", pr_number="100"):
"""Helper to make API request."""
request = self.factory.get("/")
request.user = self.user
endpoint = OrganizationPrCommentsEndpoint()
return endpoint.get(
request=request,
organization=self.organization,
repo_name=repo_name,
pr_number=pr_number,
)
@with_feature("organizations:pr-page")
@patch("sentry.integrations.github.client.GitHubApiClient.get")
def test_successful_pr_comments_fetch(self, mock_get):
"""Test successful fetch of both general and review comments."""
def mock_get_side_effect(url):
if "issues" in url:
return self.mock_general_comments
elif "pulls" in url:
return self.mock_review_comments
return []
mock_get.side_effect = mock_get_side_effect
response = self._make_request()
assert response.status_code == 200
# Verify both API calls were made
assert mock_get.call_count == 2
mock_get.assert_any_call("/repos/getsentry/sentry/issues/100/comments")
mock_get.assert_any_call("/repos/getsentry/sentry/pulls/100/comments")
# Verify response structure
assert "general_comments" in response.data
assert "file_comments" in response.data
# Verify general comments
general_comments = response.data["general_comments"]
assert len(general_comments) == 2
assert general_comments[0]["body"] == "This looks great!"
assert general_comments[1]["body"] == "Can you add tests?"
# Verify file comments are organized by file
file_comments = response.data["file_comments"]
assert len(file_comments) == 2 # Two files have comments
assert "src/components/Button.tsx" in file_comments
assert "src/utils/helper.ts" in file_comments
# Verify Button.tsx has 2 comments
button_comments = file_comments["src/components/Button.tsx"]
assert len(button_comments) == 2
assert button_comments[0]["body"] == "Consider using a const here"
assert button_comments[1]["body"] == "Good catch!"
# Verify helper.ts has 1 comment
helper_comments = file_comments["src/utils/helper.ts"]
assert len(helper_comments) == 1
assert helper_comments[0]["body"] == "This could be simplified"
@with_feature("organizations:pr-page")
def test_no_github_client(self):
"""Test when no GitHub client is available (no integration set up)."""
Repository.objects.create(
organization_id=self.organization.id,
name="nonexistent/repo",
provider="integrations:github",
integration_id=None, # No integration
)
response = self._make_request(repo_name="nonexistent/repo")
assert response.status_code == 404
assert response.data["error"] == "integration_not_found"
assert "No GitHub integration found" in response.data["message"]
@with_feature("organizations:pr-page")
@patch("sentry.integrations.github.client.GitHubApiClient.get")
def test_github_api_error(self, mock_get):
"""Test GitHub API error handling."""
# Simulate GitHub API error
mock_get.side_effect = ApiError("API rate limit exceeded")
response = self._make_request()
assert response.status_code == 502
assert response.data["error"] == "api_error"
assert "Failed to fetch pull request comments from GitHub" in response.data["message"]
@with_feature("organizations:pr-page")
def test_repository_not_found(self):
"""Test when repository doesn't exist in the database."""
response = self._make_request(repo_name="does-not/exist")
assert response.status_code == 404
assert response.data["error"] == "integration_not_found"
assert "No GitHub integration found" in response.data["message"]
@with_feature("organizations:pr-page")
@patch("sentry.integrations.github.client.GitHubApiClient.get")
def test_unexpected_error(self, mock_get):
"""Test handling of unexpected errors."""
# Simulate unexpected error (not ApiError)
mock_get.side_effect = ValueError("Unexpected error")
response = self._make_request()
assert response.status_code == 500
assert response.data["error"] == "internal_error"
assert "An unexpected error occurred" in response.data["message"]
| OrganizationPrCommentsEndpointTest |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/dag_runs.py | {
"start": 991,
"end": 1457
} | class ____(BaseModel):
"""DAG Run serializer for responses."""
id: int
dag_id: str
run_id: str
logical_date: datetime | None
run_after: datetime
start_date: datetime | None
end_date: datetime | None
state: DagRunState
@computed_field
def duration(self) -> float | None:
if self.end_date and self.start_date:
return (self.end_date - self.start_date).total_seconds()
return None
| DAGRunLightResponse |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/version.py | {
"start": 691,
"end": 2076
} | class ____(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
| Version |
python | Lightning-AI__lightning | tests/tests_pytorch/strategies/test_model_parallel_integration.py | {
"start": 2954,
"end": 3479
} | class ____(LightningModule):
def __init__(self, compile=False):
super().__init__()
self.model = FeedForward()
self._compile = compile
def training_step(self, batch):
output = self.model(batch)
return output.sum()
def train_dataloader(self):
dataset_size = 8
dataset = RandomDataset(32, dataset_size)
return DataLoader(dataset, batch_size=2)
def configure_optimizers(self):
return torch.optim.AdamW(self.model.parameters())
| TemplateModel |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/endpoint_service.py | {
"start": 5193,
"end": 8078
} | class ____(GoogleCloudBaseOperator):
"""
Deletes an Endpoint.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param endpoint_id: Required. The Endpoint ID to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "endpoint_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
endpoint_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.endpoint_id = endpoint_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = EndpointServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
self.log.info("Deleting endpoint: %s", self.endpoint_id)
operation = hook.delete_endpoint(
project_id=self.project_id,
region=self.region,
endpoint=self.endpoint_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Endpoint was deleted.")
except NotFound:
self.log.info("The Endpoint ID %s does not exist.", self.endpoint_id)
| DeleteEndpointOperator |
python | django__django | django/core/management/commands/compilemessages.py | {
"start": 601,
"end": 7006
} | class ____(BaseCommand):
help = "Compiles .po files to .mo files for use with builtin gettext support."
requires_system_checks = []
program = "msgfmt"
program_options = ["--check-format"]
def add_arguments(self, parser):
parser.add_argument(
"--locale",
"-l",
action="append",
default=[],
help="Locale(s) to process (e.g. de_AT). Default is to process all. "
"Can be used multiple times.",
)
parser.add_argument(
"--exclude",
"-x",
action="append",
default=[],
help="Locales to exclude. Default is none. Can be used multiple times.",
)
parser.add_argument(
"--use-fuzzy",
"-f",
dest="fuzzy",
action="store_true",
help="Use fuzzy translations.",
)
parser.add_argument(
"--ignore",
"-i",
action="append",
dest="ignore_patterns",
default=[],
metavar="PATTERN",
help="Ignore directories matching this glob-style pattern. "
"Use multiple times to ignore more.",
)
def handle(self, **options):
locale = options["locale"]
exclude = options["exclude"]
ignore_patterns = set(options["ignore_patterns"])
self.verbosity = options["verbosity"]
if options["fuzzy"]:
self.program_options = [*self.program_options, "-f"]
if find_command(self.program) is None:
raise CommandError(
f"Can't find {self.program}. Make sure you have GNU gettext "
"tools 0.19 or newer installed."
)
basedirs = [os.path.join("conf", "locale"), "locale"]
if os.environ.get("DJANGO_SETTINGS_MODULE"):
from django.conf import settings
basedirs.extend(settings.LOCALE_PATHS)
# Walk entire tree, looking for locale directories
for dirpath, dirnames, filenames in os.walk(".", topdown=True):
# As we may modify dirnames, iterate through a copy of it instead
for dirname in list(dirnames):
if is_ignored_path(
os.path.normpath(os.path.join(dirpath, dirname)), ignore_patterns
):
dirnames.remove(dirname)
elif dirname == "locale":
basedirs.append(os.path.join(dirpath, dirname))
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError(
"This script should be run from the Django Git "
"checkout or your project or app tree, or with "
"the settings module specified."
)
# Build locale list
all_locales = []
for basedir in basedirs:
locale_dirs = filter(os.path.isdir, glob.glob("%s/*" % basedir))
all_locales.extend(map(os.path.basename, locale_dirs))
# Account for excluded locales
locales = locale or all_locales
locales = set(locales).difference(exclude)
self.has_errors = False
for basedir in basedirs:
if locales:
dirs = [
os.path.join(basedir, locale, "LC_MESSAGES") for locale in locales
]
else:
dirs = [basedir]
locations = []
for ldir in dirs:
for dirpath, dirnames, filenames in os.walk(ldir):
locations.extend(
(dirpath, f) for f in filenames if f.endswith(".po")
)
if locations:
self.compile_messages(locations)
if self.has_errors:
raise CommandError("compilemessages generated one or more errors.")
def compile_messages(self, locations):
"""
Locations is a list of tuples: [(directory, file), ...]
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for i, (dirpath, f) in enumerate(locations):
po_path = Path(dirpath) / f
mo_path = po_path.with_suffix(".mo")
try:
if mo_path.stat().st_mtime >= po_path.stat().st_mtime:
if self.verbosity > 0:
self.stdout.write(
"File “%s” is already compiled and up to date."
% po_path
)
continue
except FileNotFoundError:
pass
if self.verbosity > 0:
self.stdout.write("processing file %s in %s" % (f, dirpath))
if has_bom(po_path):
self.stderr.write(
"The %s file has a BOM (Byte Order Mark). Django only "
"supports .po files encoded in UTF-8 and without any BOM."
% po_path
)
self.has_errors = True
continue
# Check writability on first location
if i == 0 and not is_dir_writable(mo_path.parent):
self.stderr.write(
"The po files under %s are in a seemingly not writable "
"location. mo files will not be updated/created." % dirpath
)
self.has_errors = True
return
args = [self.program, *self.program_options, "-o", mo_path, po_path]
futures.append(executor.submit(popen_wrapper, args))
for future in concurrent.futures.as_completed(futures):
output, errors, status = future.result()
if status:
if self.verbosity > 0:
if errors:
self.stderr.write(
"Execution of %s failed: %s" % (self.program, errors)
)
else:
self.stderr.write("Execution of %s failed" % self.program)
self.has_errors = True
| Command |
python | getsentry__sentry | src/sentry/integrations/vercel/integration.py | {
"start": 15338,
"end": 18518
} | class ____(IntegrationProvider):
key = "vercel"
name = "Vercel"
can_add = False
can_disable = False
metadata = metadata
integration_cls = VercelIntegration
features = frozenset([IntegrationFeatures.DEPLOYMENT])
oauth_redirect_url = "/extensions/vercel/configure/"
# feature flag handler is in getsentry
requires_feature_flag = True
def _identity_pipeline_view(self) -> PipelineView[IntegrationPipeline]:
return NestedPipelineView(
bind_key="identity",
provider_key=self.key,
pipeline_cls=IdentityPipeline,
config={"redirect_url": absolute_uri(self.oauth_redirect_url)},
)
def get_pipeline_views(self) -> Sequence[PipelineView[IntegrationPipeline]]:
return [self._identity_pipeline_view()]
def build_integration(self, state: Mapping[str, Any]) -> IntegrationData:
data = state["identity"]["data"]
access_token = data["access_token"]
team_id = data.get("team_id")
client = VercelClient(access_token, team_id)
if team_id:
external_id = team_id
installation_type = "team"
team = client.get_team()
name = team["name"]
else:
external_id = data["user_id"]
installation_type = "user"
user = client.get_user()
name = user.get("name") or user["username"]
return {
"name": name,
"external_id": external_id,
"metadata": {
"access_token": access_token,
"installation_id": data["installation_id"],
"installation_type": installation_type,
},
"post_install_data": {"user_id": state["user_id"]},
}
def post_install(
self,
integration: Integration,
organization: RpcOrganization,
*,
extra: dict[str, Any],
) -> None:
# check if we have an Vercel internal installation already
if SentryAppInstallationForProvider.objects.filter(
organization_id=organization.id, provider="vercel"
).exists():
logger.info(
"vercel.post_install.installation_exists",
extra={"organization_id": organization.id},
)
return
user = User.objects.get(id=extra.get("user_id"))
# create the internal integration and link it to the join table
sentry_app = SentryAppCreator(
name="Vercel Internal Integration",
author="Auto-generated by Sentry",
organization_id=organization.id,
is_internal=True,
verify_install=False,
overview=internal_integration_overview.strip(),
scopes=["project:releases", "project:read", "project:write"],
).run(user=user)
sentry_app_installation = SentryAppInstallation.objects.get(sentry_app=sentry_app)
SentryAppInstallationForProvider.objects.create(
sentry_app_installation=sentry_app_installation,
organization_id=organization.id,
provider="vercel",
)
| VercelIntegrationProvider |
python | sympy__sympy | sympy/physics/mechanics/linearize.py | {
"start": 342,
"end": 17242
} | class ____:
"""This object holds the general model form for a dynamic system. This
model is used for computing the linearized form of the system, while
properly dealing with constraints leading to dependent coordinates and
speeds. The notation and method is described in [1]_.
Attributes
==========
f_0, f_1, f_2, f_3, f_4, f_c, f_v, f_a : Matrix
Matrices holding the general system form.
q, u, r : Matrix
Matrices holding the generalized coordinates, speeds, and
input vectors.
q_i, u_i : Matrix
Matrices of the independent generalized coordinates and speeds.
q_d, u_d : Matrix
Matrices of the dependent generalized coordinates and speeds.
perm_mat : Matrix
Permutation matrix such that [q_ind, u_ind]^T = perm_mat*[q, u]^T
References
==========
.. [1] D. L. Peterson, G. Gede, and M. Hubbard, "Symbolic linearization of
equations of motion of constrained multibody systems," Multibody
Syst Dyn, vol. 33, no. 2, pp. 143-161, Feb. 2015, doi:
10.1007/s11044-014-9436-5.
"""
def __init__(self, f_0, f_1, f_2, f_3, f_4, f_c, f_v, f_a, q, u, q_i=None,
q_d=None, u_i=None, u_d=None, r=None, lams=None,
linear_solver='LU'):
"""
Parameters
==========
f_0, f_1, f_2, f_3, f_4, f_c, f_v, f_a : array_like
System of equations holding the general system form.
Supply empty array or Matrix if the parameter
does not exist.
q : array_like
The generalized coordinates.
u : array_like
The generalized speeds
q_i, u_i : array_like, optional
The independent generalized coordinates and speeds.
q_d, u_d : array_like, optional
The dependent generalized coordinates and speeds.
r : array_like, optional
The input variables.
lams : array_like, optional
The lagrange multipliers
linear_solver : str, callable
Method used to solve the several symbolic linear systems of the
form ``A*x=b`` in the linearization process. If a string is
supplied, it should be a valid method that can be used with the
:meth:`sympy.matrices.matrixbase.MatrixBase.solve`. If a callable is
supplied, it should have the format ``x = f(A, b)``, where it
solves the equations and returns the solution. The default is
``'LU'`` which corresponds to SymPy's ``A.LUsolve(b)``.
``LUsolve()`` is fast to compute but will often result in
divide-by-zero and thus ``nan`` results.
"""
self.linear_solver = _parse_linear_solver(linear_solver)
# Generalized equation form
self.f_0 = Matrix(f_0)
self.f_1 = Matrix(f_1)
self.f_2 = Matrix(f_2)
self.f_3 = Matrix(f_3)
self.f_4 = Matrix(f_4)
self.f_c = Matrix(f_c)
self.f_v = Matrix(f_v)
self.f_a = Matrix(f_a)
# Generalized equation variables
self.q = Matrix(q)
self.u = Matrix(u)
none_handler = lambda x: Matrix(x) if x else Matrix()
self.q_i = none_handler(q_i)
self.q_d = none_handler(q_d)
self.u_i = none_handler(u_i)
self.u_d = none_handler(u_d)
self.r = none_handler(r)
self.lams = none_handler(lams)
# Derivatives of generalized equation variables
self._qd = self.q.diff(dynamicsymbols._t)
self._ud = self.u.diff(dynamicsymbols._t)
# If the user doesn't actually use generalized variables, and the
# qd and u vectors have any intersecting variables, this can cause
# problems. We'll fix this with some hackery, and Dummy variables
dup_vars = set(self._qd).intersection(self.u)
self._qd_dup = Matrix([var if var not in dup_vars else Dummy() for var
in self._qd])
# Derive dimension terms
l = len(self.f_c)
m = len(self.f_v)
n = len(self.q)
o = len(self.u)
s = len(self.r)
k = len(self.lams)
dims = namedtuple('dims', ['l', 'm', 'n', 'o', 's', 'k'])
self._dims = dims(l, m, n, o, s, k)
self._Pq = None
self._Pqi = None
self._Pqd = None
self._Pu = None
self._Pui = None
self._Pud = None
self._C_0 = None
self._C_1 = None
self._C_2 = None
self.perm_mat = None
self._setup_done = False
def _setup(self):
# Calculations here only need to be run once. They are moved out of
# the __init__ method to increase the speed of Linearizer creation.
self._form_permutation_matrices()
self._form_block_matrices()
self._form_coefficient_matrices()
self._setup_done = True
def _form_permutation_matrices(self):
"""Form the permutation matrices Pq and Pu."""
# Extract dimension variables
l, m, n, o, s, k = self._dims
# Compute permutation matrices
if n != 0:
self._Pq = permutation_matrix(self.q, Matrix([self.q_i, self.q_d]))
if l > 0:
self._Pqi = self._Pq[:, :-l]
self._Pqd = self._Pq[:, -l:]
else:
self._Pqi = self._Pq
self._Pqd = Matrix()
if o != 0:
self._Pu = permutation_matrix(self.u, Matrix([self.u_i, self.u_d]))
if m > 0:
self._Pui = self._Pu[:, :-m]
self._Pud = self._Pu[:, -m:]
else:
self._Pui = self._Pu
self._Pud = Matrix()
# Compute combination permutation matrix for computing A and B
P_col1 = Matrix([self._Pqi, zeros(o + k, n - l)])
P_col2 = Matrix([zeros(n, o - m), self._Pui, zeros(k, o - m)])
if P_col1:
if P_col2:
self.perm_mat = P_col1.row_join(P_col2)
else:
self.perm_mat = P_col1
else:
self.perm_mat = P_col2
def _form_coefficient_matrices(self):
"""Form the coefficient matrices C_0, C_1, and C_2."""
# Extract dimension variables
l, m, n, o, s, k = self._dims
# Build up the coefficient matrices C_0, C_1, and C_2
# If there are configuration constraints (l > 0), form C_0 as normal.
# If not, C_0 is I_(nxn). Note that this works even if n=0
if l > 0:
f_c_jac_q = self.f_c.jacobian(self.q)
self._C_0 = (eye(n) - self._Pqd *
self.linear_solver(f_c_jac_q*self._Pqd,
f_c_jac_q))*self._Pqi
else:
self._C_0 = eye(n)
# If there are motion constraints (m > 0), form C_1 and C_2 as normal.
# If not, C_1 is 0, and C_2 is I_(oxo). Note that this works even if
# o = 0.
if m > 0:
f_v_jac_u = self.f_v.jacobian(self.u)
temp = f_v_jac_u * self._Pud
if n != 0:
f_v_jac_q = self.f_v.jacobian(self.q)
self._C_1 = -self._Pud * self.linear_solver(temp, f_v_jac_q)
else:
self._C_1 = zeros(o, n)
self._C_2 = (eye(o) - self._Pud *
self.linear_solver(temp, f_v_jac_u))*self._Pui
else:
self._C_1 = zeros(o, n)
self._C_2 = eye(o)
def _form_block_matrices(self):
"""Form the block matrices for composing M, A, and B."""
# Extract dimension variables
l, m, n, o, s, k = self._dims
# Block Matrix Definitions. These are only defined if under certain
# conditions. If undefined, an empty matrix is used instead
if n != 0:
self._M_qq = self.f_0.jacobian(self._qd)
self._A_qq = -(self.f_0 + self.f_1).jacobian(self.q)
else:
self._M_qq = Matrix()
self._A_qq = Matrix()
if n != 0 and m != 0:
self._M_uqc = self.f_a.jacobian(self._qd_dup)
self._A_uqc = -self.f_a.jacobian(self.q)
else:
self._M_uqc = Matrix()
self._A_uqc = Matrix()
if n != 0 and o - m + k != 0:
self._M_uqd = self.f_3.jacobian(self._qd_dup)
self._A_uqd = -(self.f_2 + self.f_3 + self.f_4).jacobian(self.q)
else:
self._M_uqd = Matrix()
self._A_uqd = Matrix()
if o != 0 and m != 0:
self._M_uuc = self.f_a.jacobian(self._ud)
self._A_uuc = -self.f_a.jacobian(self.u)
else:
self._M_uuc = Matrix()
self._A_uuc = Matrix()
if o != 0 and o - m + k != 0:
self._M_uud = self.f_2.jacobian(self._ud)
self._A_uud = -(self.f_2 + self.f_3).jacobian(self.u)
else:
self._M_uud = Matrix()
self._A_uud = Matrix()
if o != 0 and n != 0:
self._A_qu = -self.f_1.jacobian(self.u)
else:
self._A_qu = Matrix()
if k != 0 and o - m + k != 0:
self._M_uld = self.f_4.jacobian(self.lams)
else:
self._M_uld = Matrix()
if s != 0 and o - m + k != 0:
self._B_u = -self.f_3.jacobian(self.r)
else:
self._B_u = Matrix()
def linearize(self, op_point=None, A_and_B=False, simplify=False):
"""Linearize the system about the operating point. Note that
q_op, u_op, qd_op, ud_op must satisfy the equations of motion.
These may be either symbolic or numeric.
Parameters
==========
op_point : dict or iterable of dicts, optional
Dictionary or iterable of dictionaries containing the operating
point conditions for all or a subset of the generalized
coordinates, generalized speeds, and time derivatives of the
generalized speeds. These will be substituted into the linearized
system before the linearization is complete. Leave set to ``None``
if you want the operating point to be an arbitrary set of symbols.
Note that any reduction in symbols (whether substituted for numbers
or expressions with a common parameter) will result in faster
runtime.
A_and_B : bool, optional
If A_and_B=False (default), (M, A, B) is returned and of
A_and_B=True, (A, B) is returned. See below.
simplify : bool, optional
Determines if returned values are simplified before return.
For large expressions this may be time consuming. Default is False.
Returns
=======
M, A, B : Matrices, ``A_and_B=False``
Matrices from the implicit form:
``[M]*[q', u']^T = [A]*[q_ind, u_ind]^T + [B]*r``
A, B : Matrices, ``A_and_B=True``
Matrices from the explicit form:
``[q_ind', u_ind']^T = [A]*[q_ind, u_ind]^T + [B]*r``
Notes
=====
Note that the process of solving with A_and_B=True is computationally
intensive if there are many symbolic parameters. For this reason, it
may be more desirable to use the default A_and_B=False, returning M, A,
and B. More values may then be substituted in to these matrices later
on. The state space form can then be found as A = P.T*M.LUsolve(A), B =
P.T*M.LUsolve(B), where P = Linearizer.perm_mat.
"""
# Run the setup if needed:
if not self._setup_done:
self._setup()
# Compose dict of operating conditions
if isinstance(op_point, dict):
op_point_dict = op_point
elif isinstance(op_point, Iterable):
op_point_dict = {}
for op in op_point:
op_point_dict.update(op)
else:
op_point_dict = {}
# Extract dimension variables
l, m, n, o, s, k = self._dims
# Rename terms to shorten expressions
M_qq = self._M_qq
M_uqc = self._M_uqc
M_uqd = self._M_uqd
M_uuc = self._M_uuc
M_uud = self._M_uud
M_uld = self._M_uld
A_qq = self._A_qq
A_uqc = self._A_uqc
A_uqd = self._A_uqd
A_qu = self._A_qu
A_uuc = self._A_uuc
A_uud = self._A_uud
B_u = self._B_u
C_0 = self._C_0
C_1 = self._C_1
C_2 = self._C_2
# Build up Mass Matrix
# |M_qq 0_nxo 0_nxk|
# M = |M_uqc M_uuc 0_mxk|
# |M_uqd M_uud M_uld|
if o != 0:
col2 = Matrix([zeros(n, o), M_uuc, M_uud])
if k != 0:
col3 = Matrix([zeros(n + m, k), M_uld])
if n != 0:
col1 = Matrix([M_qq, M_uqc, M_uqd])
if o != 0 and k != 0:
M = col1.row_join(col2).row_join(col3)
elif o != 0:
M = col1.row_join(col2)
else:
M = col1
elif k != 0:
M = col2.row_join(col3)
else:
M = col2
M_eq = msubs(M, op_point_dict)
# Build up state coefficient matrix A
# |(A_qq + A_qu*C_1)*C_0 A_qu*C_2|
# A = |(A_uqc + A_uuc*C_1)*C_0 A_uuc*C_2|
# |(A_uqd + A_uud*C_1)*C_0 A_uud*C_2|
# Col 1 is only defined if n != 0
if n != 0:
r1c1 = A_qq
if o != 0:
r1c1 += (A_qu * C_1)
r1c1 = r1c1 * C_0
if m != 0:
r2c1 = A_uqc
if o != 0:
r2c1 += (A_uuc * C_1)
r2c1 = r2c1 * C_0
else:
r2c1 = Matrix()
if o - m + k != 0:
r3c1 = A_uqd
if o != 0:
r3c1 += (A_uud * C_1)
r3c1 = r3c1 * C_0
else:
r3c1 = Matrix()
col1 = Matrix([r1c1, r2c1, r3c1])
else:
col1 = Matrix()
# Col 2 is only defined if o != 0
if o != 0:
if n != 0:
r1c2 = A_qu * C_2
else:
r1c2 = Matrix()
if m != 0:
r2c2 = A_uuc * C_2
else:
r2c2 = Matrix()
if o - m + k != 0:
r3c2 = A_uud * C_2
else:
r3c2 = Matrix()
col2 = Matrix([r1c2, r2c2, r3c2])
else:
col2 = Matrix()
if col1:
if col2:
Amat = col1.row_join(col2)
else:
Amat = col1
else:
Amat = col2
Amat_eq = msubs(Amat, op_point_dict)
# Build up the B matrix if there are forcing variables
# |0_(n + m)xs|
# B = |B_u |
if s != 0 and o - m + k != 0:
Bmat = zeros(n + m, s).col_join(B_u)
Bmat_eq = msubs(Bmat, op_point_dict)
else:
Bmat_eq = Matrix()
# kwarg A_and_B indicates to return A, B for forming the equation
# dx = [A]x + [B]r, where x = [q_indnd, u_indnd]^T,
if A_and_B:
A_cont = self.perm_mat.T * self.linear_solver(M_eq, Amat_eq)
if Bmat_eq:
B_cont = self.perm_mat.T * self.linear_solver(M_eq, Bmat_eq)
else:
# Bmat = Matrix([]), so no need to sub
B_cont = Bmat_eq
if simplify:
A_cont.simplify()
B_cont.simplify()
return A_cont, B_cont
# Otherwise return M, A, B for forming the equation
# [M]dx = [A]x + [B]r, where x = [q, u]^T
else:
if simplify:
M_eq.simplify()
Amat_eq.simplify()
Bmat_eq.simplify()
return M_eq, Amat_eq, Bmat_eq
def permutation_matrix(orig_vec, per_vec):
"""Compute the permutation matrix to change order of
orig_vec into order of per_vec.
Parameters
==========
orig_vec : array_like
Symbols in original ordering.
per_vec : array_like
Symbols in new ordering.
Returns
=======
p_matrix : Matrix
Permutation matrix such that orig_vec == (p_matrix * per_vec).
"""
if not isinstance(orig_vec, (list, tuple)):
orig_vec = flatten(orig_vec)
if not isinstance(per_vec, (list, tuple)):
per_vec = flatten(per_vec)
if set(orig_vec) != set(per_vec):
raise ValueError("orig_vec and per_vec must be the same length, "
"and contain the same symbols.")
ind_list = [orig_vec.index(i) for i in per_vec]
p_matrix = zeros(len(orig_vec))
for i, j in enumerate(ind_list):
p_matrix[i, j] = 1
return p_matrix
| Linearizer |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 66100,
"end": 68295
} | class ____(ASTTrailingTypeSpec):
def __init__(
self, prefix: str, nestedName: ASTNestedName, placeholderType: str | None
) -> None:
self.prefix = prefix
self.nestedName = nestedName
self.placeholderType = placeholderType
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTrailingTypeSpecName):
return NotImplemented
return (
self.prefix == other.prefix
and self.nestedName == other.nestedName
and self.placeholderType == other.placeholderType
)
def __hash__(self) -> int:
return hash((self.prefix, self.nestedName, self.placeholderType))
@property
def name(self) -> ASTNestedName:
return self.nestedName
def get_id(self, version: int) -> str:
return self.nestedName.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
if self.prefix:
res.extend((self.prefix, ' '))
res.append(transform(self.nestedName))
if self.placeholderType is not None:
res.extend((' ', self.placeholderType))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
if self.prefix:
signode += addnodes.desc_sig_keyword(self.prefix, self.prefix)
signode += addnodes.desc_sig_space()
self.nestedName.describe_signature(signode, mode, env, symbol=symbol)
if self.placeholderType is not None:
signode += addnodes.desc_sig_space()
if self.placeholderType == 'auto':
signode += addnodes.desc_sig_keyword('auto', 'auto')
elif self.placeholderType == 'decltype(auto)':
signode += addnodes.desc_sig_keyword('decltype', 'decltype')
signode += addnodes.desc_sig_punctuation('(', '(')
signode += addnodes.desc_sig_keyword('auto', 'auto')
signode += addnodes.desc_sig_punctuation(')', ')')
else:
raise AssertionError(self.placeholderType)
| ASTTrailingTypeSpecName |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 231536,
"end": 233371
} | class ____(fixtures.TestBase):
"""test pg-specific types for insertmanyvalues"""
__only_on__ = "postgresql"
__backend__ = True
@testing.combinations(
("BYTEA", BYTEA(), b"7\xe7\x9f"),
("BIT", BIT(3), BitString("011")),
argnames="type_,value",
id_="iaa",
)
@testing.variation("sort_by_parameter_order", [True, False])
@testing.variation("multiple_rows", [True, False])
@testing.requires.insert_returning
def test_imv_returning_datatypes(
self,
connection,
metadata,
sort_by_parameter_order,
type_,
value,
multiple_rows,
):
"""test #9739, #9808 (similar to #9701) for PG specific types
this tests insertmanyvalues in conjunction with various datatypes.
These tests are particularly for the asyncpg driver which needs
most types to be explicitly cast for the new IMV format
"""
t = Table(
"d_t",
metadata,
Column("id", Integer, primary_key=True),
Column("value", type_),
)
t.create(connection)
result = connection.execute(
t.insert().returning(
t.c.id,
t.c.value,
sort_by_parameter_order=bool(sort_by_parameter_order),
),
(
[{"value": value} for i in range(10)]
if multiple_rows
else {"value": value}
),
)
if multiple_rows:
i_range = range(1, 11)
else:
i_range = range(1, 2)
eq_(
set(result),
{(id_, value) for id_ in i_range},
)
eq_(
set(connection.scalars(select(t.c.value))),
{value},
)
| PGInsertManyValuesTest |
python | pypa__warehouse | tests/unit/oidc/models/test_google.py | {
"start": 8014,
"end": 9615
} | class ____:
@pytest.mark.parametrize("sub", ["fakesubject", None])
def test_reify_does_not_exist_yet(self, db_request, sub):
pending_publisher = PendingGooglePublisherFactory.create(sub=sub)
assert (
db_request.db.query(google.GooglePublisher)
.filter_by(
email=pending_publisher.email,
sub=pending_publisher.sub,
)
.one_or_none()
is None
)
publisher = pending_publisher.reify(db_request.db)
# If an OIDC publisher for this pending publisher does not already exist,
# a new one is created and the pending publisher is marked for deletion.
assert isinstance(publisher, google.GooglePublisher)
assert pending_publisher in db_request.db.deleted
assert publisher.email == pending_publisher.email
assert publisher.sub == pending_publisher.sub
@pytest.mark.parametrize("sub", ["fakesubject", None])
def test_reify_already_exists(self, db_request, sub):
existing_publisher = GooglePublisherFactory.create(sub=sub)
pending_publisher = PendingGooglePublisherFactory.create(
email=existing_publisher.email,
sub=existing_publisher.sub,
)
publisher = pending_publisher.reify(db_request.db)
# If an OIDC publisher for this pending publisher already exists,
# it is returned and the pending publisher is marked for deletion.
assert existing_publisher == publisher
assert pending_publisher in db_request.db.deleted
| TestPendingGooglePublisher |
python | PyCQA__pylint | tests/checkers/unittest_typecheck.py | {
"start": 683,
"end": 1431
} | class ____(CheckerTestCase):
"""Tests for pylint.checkers.typecheck."""
CHECKER_CLASS = typecheck.TypeChecker
@needs_c_extension
def test_nomember_on_c_extension_info_msg(self) -> None:
node = astroid.extract_node(
"""
from coverage import tracer
tracer.CTracer #@
"""
)
message = MessageTest(
"c-extension-no-member",
node=node,
args=("Module", "coverage.tracer", "CTracer", ""),
confidence=INFERENCE,
line=3,
col_offset=0,
end_line=3,
end_col_offset=14,
)
with self.assertAddsMessages(message):
self.checker.visit_attribute(node)
| TestTypeChecker |
python | huggingface__transformers | src/transformers/models/markuplm/modeling_markuplm.py | {
"start": 9535,
"end": 10214
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertPooler
| MarkupLMOutput |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 63433,
"end": 63979
} | class ____(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
| AnnotatedConvModel |
python | pytorch__pytorch | test/test_fx_passes.py | {
"start": 15627,
"end": 16221
} | class ____:
@staticmethod
def forward(x):
a = torch.neg(x)
a = a.relu()
left = a.sigmoid()
right = a.relu()
out = left + right
return out
@staticmethod
def pattern(a):
a = a.relu()
left = a.sigmoid()
right = a.relu()
out = left + right
return out
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 1),
TestCase(False, True, 0),
TestCase(True, True, 0)
]
| DiamondShapePatternTestCase |
python | google__pytype | pytype/tests/test_protocols2.py | {
"start": 21570,
"end": 24730
} | class ____(test_base.BaseTest):
"""Tests for protocol implementation on a target using a Python 3 feature."""
def test_check_iterator(self):
self.Check("""
from typing import Iterator
def f(x: Iterator):
return None
class Foo:
def __next__(self):
return None
def __iter__(self):
return None
foo = Foo()
f(foo)
""")
def test_check_parameterized_iterator(self):
self.Check("""
from typing import Iterator
def f(x: Iterator[int]):
return None
class Foo:
def __next__(self):
return 42
def __iter__(self):
return self
f(Foo())
""")
def test_inherited_abstract_method(self):
self.Check("""
from typing import Iterator
class Foo:
def __iter__(self) -> Iterator[int]:
return __any_object__
def __next__(self):
return __any_object__
def f(x: Iterator[int]):
pass
f(Foo())
""")
def test_check_supports_bytes_protocol(self):
self.Check("""
import protocols
from typing import SupportsBytes
def f(x: protocols.SupportsBytes):
return None
def g(x: SupportsBytes):
return None
class Foo:
def __bytes__(self):
return b"foo"
foo = Foo()
f(foo)
g(foo)
""")
def test_metaclass_abstractness(self):
self.Check("""
import abc
from typing import Protocol
class Meta1(type(Protocol)):
pass
class Meta2(Protocol.__class__):
pass
class Foo(metaclass=Meta1):
@abc.abstractmethod
def foo(self):
pass
class Bar(metaclass=Meta2):
@abc.abstractmethod
def bar(self):
pass
""")
def test_module(self):
foo_ty = self.Infer("""
x: int
def f() -> str:
return 'hello world'
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo_ty))
errors = self.CheckWithErrors(
"""
import foo
from typing import Protocol
class ShouldMatch(Protocol):
x: int
def f(self) -> str: ...
class ExtraAttribute(Protocol):
x: int
y: str
class ExtraMethod(Protocol):
def f(self) -> str: ...
def g(self) -> int: ...
class WrongType(Protocol):
x: str
def should_match(x: ShouldMatch):
pass
def extra_attribute(x: ExtraAttribute):
pass
def extra_method(x: ExtraMethod):
pass
def wrong_type(x: WrongType):
pass
should_match(foo)
extra_attribute(foo) # wrong-arg-types[e1]
extra_method(foo) # wrong-arg-types[e2]
wrong_type(foo) # wrong-arg-types[e3]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(
errors,
{
"e1": r"not implemented on module: y",
"e2": r"not implemented on module: g",
"e3": r"x.*expected str, got int",
},
)
| ProtocolsTestPython3Feature |
python | allegroai__clearml | examples/frameworks/jsonargparse/pytorch_lightning_cli_old.py | {
"start": 1972,
"end": 3930
} | class ____(LightningModule):
def __init__(self, model=None, lr=1.0, gamma=0.7, batch_size=32):
super().__init__()
self.save_hyperparameters(ignore="model")
self.model = model or Net()
try:
self.test_acc = Accuracy()
except TypeError:
self.test_acc = Accuracy("binary")
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.nll_loss(logits, y.long())
self.log("train_loss", loss)
return loss
def test_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.nll_loss(logits, y.long())
self.test_acc(logits, y)
self.log("test_acc", self.test_acc)
self.log("test_loss", loss)
def configure_optimizers(self):
optimizer = torch.optim.Adadelta(self.model.parameters(), lr=self.hparams.lr)
return [optimizer], [torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=self.hparams.gamma)]
@property
def transform(self):
return T.Compose([T.ToTensor(), T.Normalize((0.1307,), (0.3081,))])
def prepare_data(self) -> None:
MNIST("./data", download=True)
def train_dataloader(self):
train_dataset = MNIST("./data", train=True, download=False, transform=self.transform)
return torch.utils.data.DataLoader(train_dataset, batch_size=self.hparams.batch_size)
def test_dataloader(self):
test_dataset = MNIST("./data", train=False, download=False, transform=self.transform)
return torch.utils.data.DataLoader(test_dataset, batch_size=self.hparams.batch_size)
if __name__ == "__main__":
Task.add_requirements("requirements.txt")
Task.init(project_name="example", task_name="pytorch_lightning_jsonargparse")
LightningCLI(ImageClassifier, seed_everything_default=42, run=True)
| ImageClassifier |
python | huggingface__transformers | src/transformers/models/gpt_oss/modular_gpt_oss.py | {
"start": 8088,
"end": 10911
} | class ____(Qwen2RotaryEmbedding):
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = freqs
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(x.dtype), sin.to(x.dtype)
def _apply_rotary_emb(
x: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
) -> torch.Tensor:
first_half, second_half = torch.chunk(x, 2, dim=-1)
first_ = first_half * cos - second_half * sin
second_ = second_half * cos + first_half * sin
return torch.cat((first_, second_), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = _apply_rotary_emb(q, cos, sin)
k_embed = _apply_rotary_emb(k, cos, sin)
return q_embed, k_embed
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
sinks = module.sinks.reshape(1, -1, 1, 1).expand(query.shape[0], -1, query.shape[-2], -1)
combined_logits = torch.cat([attn_weights, sinks], dim=-1)
# This was not in the original implementation and slightly affect results; it prevents overflow in BF16/FP16
# when training with bsz>1 we clamp max values.
combined_logits = combined_logits - combined_logits.max(dim=-1, keepdim=True).values
probs = F.softmax(combined_logits, dim=-1, dtype=combined_logits.dtype)
scores = probs[..., :-1] # we drop the sink here
attn_weights = nn.functional.dropout(scores, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| GptOssRotaryEmbedding |
python | tornadoweb__tornado | tornado/test/locks_test.py | {
"start": 758,
"end": 6333
} | class ____(AsyncTestCase):
def setUp(self):
super().setUp()
self.history = [] # type: typing.List[typing.Union[int, str]]
def record_done(self, future, key):
"""Record the resolution of a Future returned by Condition.wait."""
def callback(_):
if not future.result():
# wait() resolved to False, meaning it timed out.
self.history.append("timeout")
else:
self.history.append(key)
future.add_done_callback(callback)
def loop_briefly(self):
"""Run all queued callbacks on the IOLoop.
In these tests, this method is used after calling notify() to
preserve the pre-5.0 behavior in which callbacks ran
synchronously.
"""
self.io_loop.add_callback(self.stop)
self.wait()
def test_repr(self):
c = locks.Condition()
self.assertIn("Condition", repr(c))
self.assertNotIn("waiters", repr(c))
c.wait()
self.assertIn("waiters", repr(c))
@gen_test
def test_notify(self):
c = locks.Condition()
self.io_loop.call_later(0.01, c.notify)
yield c.wait()
def test_notify_1(self):
c = locks.Condition()
self.record_done(c.wait(), "wait1")
self.record_done(c.wait(), "wait2")
c.notify(1)
self.loop_briefly()
self.history.append("notify1")
c.notify(1)
self.loop_briefly()
self.history.append("notify2")
self.assertEqual(["wait1", "notify1", "wait2", "notify2"], self.history)
def test_notify_n(self):
c = locks.Condition()
for i in range(6):
self.record_done(c.wait(), i)
c.notify(3)
self.loop_briefly()
# Callbacks execute in the order they were registered.
self.assertEqual(list(range(3)), self.history)
c.notify(1)
self.loop_briefly()
self.assertEqual(list(range(4)), self.history)
c.notify(2)
self.loop_briefly()
self.assertEqual(list(range(6)), self.history)
def test_notify_all(self):
c = locks.Condition()
for i in range(4):
self.record_done(c.wait(), i)
c.notify_all()
self.loop_briefly()
self.history.append("notify_all")
# Callbacks execute in the order they were registered.
self.assertEqual(list(range(4)) + ["notify_all"], self.history) # type: ignore
@gen_test
def test_wait_timeout(self):
c = locks.Condition()
wait = c.wait(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, c.notify) # Too late.
yield gen.sleep(0.03)
self.assertFalse((yield wait))
@gen_test
def test_wait_timeout_preempted(self):
c = locks.Condition()
# This fires before the wait times out.
self.io_loop.call_later(0.01, c.notify)
wait = c.wait(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield wait # No TimeoutError.
@gen_test
def test_notify_n_with_timeout(self):
# Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
# Wait for that timeout to expire, then do notify(2) and make
# sure everyone runs. Verifies that a timed-out callback does
# not count against the 'n' argument to notify().
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
self.record_done(c.wait(), 3)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(["timeout"], self.history)
c.notify(2)
yield gen.sleep(0.01)
self.assertEqual(["timeout", 0, 2], self.history)
self.assertEqual(["timeout", 0, 2], self.history)
c.notify()
yield
self.assertEqual(["timeout", 0, 2, 3], self.history)
@gen_test
def test_notify_all_with_timeout(self):
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(["timeout"], self.history)
c.notify_all()
yield
self.assertEqual(["timeout", 0, 2], self.history)
@gen_test
def test_nested_notify(self):
# Ensure no notifications lost, even if notify() is reentered by a
# waiter calling notify().
c = locks.Condition()
# Three waiters.
futures = [asyncio.ensure_future(c.wait()) for _ in range(3)]
# First and second futures resolved. Second future reenters notify(),
# resolving third future.
futures[1].add_done_callback(lambda _: c.notify())
c.notify(2)
yield
self.assertTrue(all(f.done() for f in futures))
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
c = locks.Condition()
for _ in range(101):
c.wait(timedelta(seconds=0.01))
future = asyncio.ensure_future(c.wait())
self.assertEqual(102, len(c._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(c._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
c.notify()
self.assertTrue(future.done())
| ConditionTest |
python | google__pytype | pytype/tests/test_basic1.py | {
"start": 57,
"end": 10826
} | class ____(test_base.BaseTest):
"""Basic tests."""
def test_constant(self):
self.Check("17")
def test_for_loop(self):
self.Check("""
out = ""
for i in range(5):
out = out + str(i)
print(out)
""")
def test_inplace_operators(self):
self.assertNoCrash(
self.Check,
"""
x, y = 2, 3
x **= y
assert x == 8 and y == 3
x *= y
assert x == 24 and y == 3
x //= y
assert x == 8 and y == 3
x %= y
assert x == 2 and y == 3
x += y
assert x == 5 and y == 3
x -= y
assert x == 2 and y == 3
x <<= y
assert x == 16 and y == 3
x >>= y
assert x == 2 and y == 3
x = 0x8F
x &= 0xA5
assert x == 0x85
x |= 0x10
assert x == 0x95
x ^= 0x33
assert x == 0xA6
""",
)
def test_inplace_division(self):
self.assertNoCrash(
self.Check,
"""
x, y = 24, 3
x /= y
assert x == 8 and y == 3
assert isinstance(x, int)
x /= y
assert x == 2 and y == 3
assert isinstance(x, int)
""",
)
def test_slice(self):
ty = self.Infer("""
s = "hello, world"
def f1():
return s[3:8]
def f2():
return s[:8]
def f3():
return s[3:]
def f4():
return s[:]
def f5():
return s[::-1]
def f6():
return s[3:8:2]
""")
self.assertTypesMatchPytd(
ty,
"""
s = ... # type: str
def f1() -> str: ...
def f2() -> str: ...
def f3() -> str: ...
def f4() -> str: ...
def f5() -> str: ...
def f6() -> str: ...
""",
)
def test_slice_assignment(self):
self.Check("""
l = list(range(10))
l[3:8] = ["x"]
print(l)
""")
self.Check("""
l = list(range(10))
l[:8] = ["x"]
print(l)
""")
self.Check("""
l = list(range(10))
l[3:] = ["x"]
print(l)
""")
self.Check("""
l = list(range(10))
l[:] = ["x"]
print(l)
""")
def test_slice_deletion(self):
self.Check("""
l = list(range(10))
del l[3:8]
print(l)
""")
self.Check("""
l = list(range(10))
del l[:8]
print(l)
""")
self.Check("""
l = list(range(10))
del l[3:]
print(l)
""")
self.Check("""
l = list(range(10))
del l[:]
print(l)
""")
self.Check("""
l = list(range(10))
del l[::2]
print(l)
""")
def test_building_stuff(self):
self.Check("""
print((1+1, 2+2, 3+3))
""")
self.Check("""
print([1+1, 2+2, 3+3])
""")
self.Check("""
print({1:1+1, 2:2+2, 3:3+3})
""")
def test_subscripting(self):
self.Check("""
l = list(range(10))
print("%s %s %s" % (l[0], l[3], l[9]))
""")
self.Check("""
l = list(range(10))
l[5] = 17
print(l)
""")
self.Check("""
l = list(range(10))
del l[5]
print(l)
""")
def test_generator_expression(self):
self.Check("""
x = "-".join(str(z) for z in range(5))
assert x == "0-1-2-3-4"
""")
def test_generator_expression2(self):
# From test_regr.py
# This failed a different way than the previous join when genexps were
# broken:
self.Check("""
from textwrap import fill
x = set(['test_str'])
width = 70
indent = 4
blanks = ' ' * indent
res = fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks)
print(res)
""")
def test_list_comprehension(self):
self.Check("""
x = [z*z for z in range(5)]
assert x == [0, 1, 4, 9, 16]
""")
def test_dict_comprehension(self):
self.Check("""
x = {z:z*z for z in range(5)}
assert x == {0:0, 1:1, 2:4, 3:9, 4:16}
""")
def test_set_comprehension(self):
self.Check("""
x = {z*z for z in range(5)}
assert x == {0, 1, 4, 9, 16}
""")
def test_list_slice(self):
self.Check("""
[1, 2, 3][1:2]
""")
def test_strange_sequence_ops(self):
# from stdlib: test/test_augassign.py
self.assertNoCrash(
self.Check,
"""
x = [1,2]
x += [3,4]
x *= 2
assert x == [1, 2, 3, 4, 1, 2, 3, 4]
x = [1, 2, 3]
y = x
x[1:2] *= 2
y[1:2] += [1]
assert x == [1, 2, 1, 2, 3]
assert x is y
""",
)
def test_unary_operators(self):
self.Check("""
x = 8
print(-x, ~x, not x)
""")
def test_attributes(self):
self.Check("""
l = lambda: 1 # Just to have an object...
l.foo = 17
print(hasattr(l, "foo"), l.foo)
del l.foo
print(hasattr(l, "foo"))
""")
def test_attribute_inplace_ops(self):
self.assertNoCrash(
self.Check,
"""
l = lambda: 1 # Just to have an object...
l.foo = 17
l.foo -= 3
print(l.foo)
""",
)
def test_deleting_names(self):
err = self.CheckWithErrors("""
g = 17
assert g == 17
del g
g # name-error[e]
""")
self.assertErrorSequences(err, {"e": ["Variable g", "deleted", "line 3"]})
def test_deleting_local_names(self):
self.InferWithErrors("""
def f():
l = 23
assert l == 23
del l
l # name-error
f()
""")
def test_import(self):
self.Check("""
import math
print(math.pi, math.e)
from math import sqrt
print(sqrt(2))
from math import *
print(sin(2))
""")
def test_classes(self):
self.Check("""
class Thing:
def __init__(self, x):
self.x = x
def meth(self, y):
return self.x * y
thing1 = Thing(2)
thing2 = Thing(3)
print(thing1.x, thing2.x)
print(thing1.meth(4), thing2.meth(5))
""")
def test_class_mros(self):
self.Check("""
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(C, B): pass
print([c.__name__ for c in D.__mro__])
print([c.__name__ for c in E.__mro__])
""")
def test_class_mro_method_calls(self):
self.Check("""
class A:
def f(self): return 'A'
class B(A): pass
class C(A):
def f(self): return 'C'
class D(B, C): pass
print(D().f())
""")
def test_calling_methods_wrong(self):
errors = self.CheckWithErrors("""
class Thing:
def __init__(self, x):
self.x = x
def meth(self, y):
return self.x * y
thing1 = Thing(2)
print(Thing.meth(14)) # missing-parameter[e]
""")
self.assertErrorRegexes(errors, {"e": r"self"})
def test_calling_subclass_methods(self):
self.Check("""
class Thing:
def foo(self):
return 17
class SubThing(Thing):
pass
st = SubThing()
print(st.foo())
""")
def test_other_class_methods(self):
errors = self.CheckWithErrors("""
class Thing:
def foo(self):
return 17
class SubThing:
def bar(self):
return 9
st = SubThing()
print(st.foo()) # attribute-error[e]
""")
self.assertErrorRegexes(errors, {"e": r"foo.*SubThing"})
def test_attribute_access(self):
self.Check("""
class Thing:
z = 17
def __init__(self):
self.x = 23
t = Thing()
print(Thing.z)
print(t.z)
print(t.x)
""")
def test_attribute_access_error(self):
errors = self.CheckWithErrors("""
class Thing:
z = 17
def __init__(self):
self.x = 23
t = Thing()
print(t.xyzzy) # attribute-error[e]
""")
self.assertErrorRegexes(errors, {"e": r"xyzzy.*Thing"})
def test_staticmethods(self):
self.Check("""
class Thing:
@staticmethod
def smeth(x):
print(x)
@classmethod
def cmeth(cls, x):
print(x)
Thing.smeth(1492)
Thing.cmeth(1776)
""")
def test_unbound_methods(self):
self.Check("""
class Thing:
def meth(self, x):
print(x)
m = Thing.meth
m(Thing(), 1815)
""")
def test_callback(self):
self.Check("""
def lcase(s):
return s.lower()
l = ["xyz", "ABC"]
l.sort(key=lcase)
print(l)
assert l == ["ABC", "xyz"]
""")
def test_unpacking(self):
self.Check("""
a, b, c = (1, 2, 3)
assert a == 1
assert b == 2
assert c == 3
""")
def test_jump_if_true_or_pop(self):
self.Check("""
def f(a, b):
return a or b
assert f(17, 0) == 17
assert f(0, 23) == 23
assert f(0, "") == ""
""")
def test_jump_if_false_or_pop(self):
self.Check("""
def f(a, b):
return not(a and b)
assert f(17, 0) is True
assert f(0, 23) is True
assert f(0, "") is True
assert f(17, 23) is False
""")
def test_pop_jump_if_true(self):
self.Check("""
def f(a):
if not a:
return 'foo'
else:
return 'bar'
assert f(0) == 'foo'
assert f(1) == 'bar'
""")
def test_decorator(self):
self.Check("""
def verbose(func):
def _wrapper(*args, **kwargs):
return func(*args, **kwargs)
return _wrapper
@verbose
def add(x, y):
return x+y
add(7, 3)
""")
def test_multiple_classes(self):
# Making classes used to mix together all the class-scoped values
# across classes. This test would fail because A.__init__ would be
# over-written with B.__init__, and A(1, 2, 3) would complain about
# too many arguments.
self.Check("""
class A:
def __init__(self, a, b, c):
self.sum = a + b + c
class B:
def __init__(self, x):
self.x = x
a = A(1, 2, 3)
b = B(7)
print(a.sum)
print(b.x)
""")
def test_global(self):
self.Check("""
foobar = False
def baz():
global foobar
foobar = True
baz()
assert(foobar)
""")
def test_delete_global(self):
self.InferWithErrors("""
a = 3
def f():
global a
del a
f()
x = a # name-error
""")
def test_string(self):
self.Check("v = '\\xff'")
def test_string2(self):
self.Check("v = '\\uD800'")
def test_del_after_listcomp(self):
self.Check("""
def foo(x):
num = 1
nums = [num for _ in range(2)]
del num
""")
| TestBasic |
python | jazzband__django-polymorphic | src/polymorphic/contrib/extra_views.py | {
"start": 585,
"end": 1946
} | class ____:
"""
Internal Mixin, that provides polymorphic integration with the ``extra_views`` package.
"""
formset_class = BasePolymorphicModelFormSet
#: Default 0 extra forms
factory_kwargs = {"extra": 0}
#: Define the children
# :type: list[PolymorphicFormSetChild]
formset_children = None
def get_formset_children(self):
"""
:rtype: list[PolymorphicFormSetChild]
"""
if not self.formset_children:
raise ImproperlyConfigured(
"Define 'formset_children' as list of `PolymorphicFormSetChild`"
)
return self.formset_children
def get_formset_child_kwargs(self):
return {}
def get_formset(self):
"""
Returns the formset class from the inline formset factory
"""
# Implementation detail:
# Since `polymorphic_modelformset_factory` and `polymorphic_inlineformset_factory` mainly
# reuse the standard factories, and then add `child_forms`, the same can be done here.
# This makes sure the base class construction is completely honored.
FormSet = super().get_formset()
FormSet.child_forms = polymorphic_child_forms_factory(
self.get_formset_children(), **self.get_formset_child_kwargs()
)
return FormSet
| PolymorphicFormSetMixin |
python | django__django | tests/template_tests/syntax_tests/test_for.py | {
"start": 164,
"end": 13096
} | class ____(SimpleTestCase):
libraries = {"custom": "template_tests.templatetags.custom"}
@setup({"for-tag01": "{% for val in values %}{{ val }}{% endfor %}"})
def test_for_tag01(self):
output = self.engine.render_to_string("for-tag01", {"values": [1, 2, 3]})
self.assertEqual(output, "123")
@setup({"for-tag02": "{% for val in values reversed %}{{ val }}{% endfor %}"})
def test_for_tag02(self):
output = self.engine.render_to_string("for-tag02", {"values": [1, 2, 3]})
self.assertEqual(output, "321")
@setup(
{"for-tag-vars01": "{% for val in values %}{{ forloop.counter }}{% endfor %}"}
)
def test_for_tag_vars01(self):
output = self.engine.render_to_string("for-tag-vars01", {"values": [6, 6, 6]})
self.assertEqual(output, "123")
@setup(
{"for-tag-vars02": "{% for val in values %}{{ forloop.counter0 }}{% endfor %}"}
)
def test_for_tag_vars02(self):
output = self.engine.render_to_string("for-tag-vars02", {"values": [6, 6, 6]})
self.assertEqual(output, "012")
@setup(
{
"for-tag-vars03": (
"{% for val in values %}{{ forloop.revcounter }}{% endfor %}"
)
}
)
def test_for_tag_vars03(self):
output = self.engine.render_to_string("for-tag-vars03", {"values": [6, 6, 6]})
self.assertEqual(output, "321")
@setup(
{
"for-tag-vars04": (
"{% for val in values %}{{ forloop.revcounter0 }}{% endfor %}"
)
}
)
def test_for_tag_vars04(self):
output = self.engine.render_to_string("for-tag-vars04", {"values": [6, 6, 6]})
self.assertEqual(output, "210")
@setup(
{
"for-tag-vars05": "{% for val in values %}"
"{% if forloop.first %}f{% else %}x{% endif %}{% endfor %}"
}
)
def test_for_tag_vars05(self):
output = self.engine.render_to_string("for-tag-vars05", {"values": [6, 6, 6]})
self.assertEqual(output, "fxx")
@setup(
{
"for-tag-vars06": "{% for val in values %}"
"{% if forloop.last %}l{% else %}x{% endif %}{% endfor %}"
}
)
def test_for_tag_vars06(self):
output = self.engine.render_to_string("for-tag-vars06", {"values": [6, 6, 6]})
self.assertEqual(output, "xxl")
@setup(
{
"for-tag-unpack01": (
"{% for key,value in items %}{{ key }}:{{ value }}/{% endfor %}"
)
}
)
def test_for_tag_unpack01(self):
output = self.engine.render_to_string(
"for-tag-unpack01", {"items": (("one", 1), ("two", 2))}
)
self.assertEqual(output, "one:1/two:2/")
@setup(
{
"for-tag-unpack03": (
"{% for key, value in items %}{{ key }}:{{ value }}/{% endfor %}"
)
}
)
def test_for_tag_unpack03(self):
output = self.engine.render_to_string(
"for-tag-unpack03", {"items": (("one", 1), ("two", 2))}
)
self.assertEqual(output, "one:1/two:2/")
@setup(
{
"for-tag-unpack04": (
"{% for key , value in items %}{{ key }}:{{ value }}/{% endfor %}"
)
}
)
def test_for_tag_unpack04(self):
output = self.engine.render_to_string(
"for-tag-unpack04", {"items": (("one", 1), ("two", 2))}
)
self.assertEqual(output, "one:1/two:2/")
@setup(
{
"for-tag-unpack05": (
"{% for key ,value in items %}{{ key }}:{{ value }}/{% endfor %}"
)
}
)
def test_for_tag_unpack05(self):
output = self.engine.render_to_string(
"for-tag-unpack05", {"items": (("one", 1), ("two", 2))}
)
self.assertEqual(output, "one:1/two:2/")
@setup(
{
"for-tag-unpack06": (
"{% for key value in items %}{{ key }}:{{ value }}/{% endfor %}"
)
}
)
def test_for_tag_unpack06(self):
msg = "'for' tag received an invalid argument: for key value in items"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string(
"for-tag-unpack06", {"items": (("one", 1), ("two", 2))}
)
@setup(
{
"for-tag-unpack07": (
"{% for key,,value in items %}{{ key }}:{{ value }}/{% endfor %}"
)
}
)
def test_for_tag_unpack07(self):
msg = "'for' tag received an invalid argument: for key,,value in items"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string(
"for-tag-unpack07", {"items": (("one", 1), ("two", 2))}
)
@setup(
{
"for-tag-unpack08": (
"{% for key,value, in items %}{{ key }}:{{ value }}/{% endfor %}"
)
}
)
def test_for_tag_unpack08(self):
msg = "'for' tag received an invalid argument: for key,value, in items"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string(
"for-tag-unpack08", {"items": (("one", 1), ("two", 2))}
)
@setup({"double-quote": '{% for "k" in items %}{{ "k" }}/{% endfor %}'})
def test_unpack_double_quote(self):
msg = """'for' tag received an invalid argument: for "k" in items"""
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("double-quote", {"items": (1, 2)})
@setup({"single-quote": "{% for 'k' in items %}{{ k }}/{% endfor %}"})
def test_unpack_single_quote(self):
msg = """'for' tag received an invalid argument: for 'k' in items"""
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("single-quote", {"items": (1, 2)})
@setup({"vertical-bar": "{% for k|upper in items %}{{ k|upper }}/{% endfor %}"})
def test_unpack_vertical_bar(self):
msg = "'for' tag received an invalid argument: for k|upper in items"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("vertical-bar", {"items": (1, 2)})
@setup(
{
"for-tag-unpack09": (
"{% for val in items %}{{ val.0 }}:{{ val.1 }}/{% endfor %}"
)
}
)
def test_for_tag_unpack09(self):
"""
A single loopvar doesn't truncate the list in val.
"""
output = self.engine.render_to_string(
"for-tag-unpack09", {"items": (("one", 1), ("two", 2))}
)
self.assertEqual(output, "one:1/two:2/")
@setup(
{
"for-tag-unpack13": (
"{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}"
)
}
)
def test_for_tag_unpack13(self):
output = self.engine.render_to_string(
"for-tag-unpack13", {"items": (("one", 1, "carrot"), ("two", 2, "cheese"))}
)
if self.engine.string_if_invalid:
self.assertEqual(output, "one:1,carrot/two:2,cheese/")
else:
self.assertEqual(output, "one:1,carrot/two:2,cheese/")
@setup(
{
"for-tag-empty01": (
"{% for val in values %}{{ val }}{% empty %}empty text{% endfor %}"
)
}
)
def test_for_tag_empty01(self):
output = self.engine.render_to_string("for-tag-empty01", {"values": [1, 2, 3]})
self.assertEqual(output, "123")
@setup(
{
"for-tag-empty02": (
"{% for val in values %}{{ val }}{% empty %}values array empty"
"{% endfor %}"
)
}
)
def test_for_tag_empty02(self):
output = self.engine.render_to_string("for-tag-empty02", {"values": []})
self.assertEqual(output, "values array empty")
@setup(
{
"for-tag-empty03": "{% for val in values %}"
"{{ val }}{% empty %}values array not found{% endfor %}"
}
)
def test_for_tag_empty03(self):
output = self.engine.render_to_string("for-tag-empty03")
self.assertEqual(output, "values array not found")
@setup(
{
"for-tag-filter-ws": (
"{% load custom %}{% for x in s|noop:'x y' %}{{ x }}{% endfor %}"
)
}
)
def test_for_tag_filter_ws(self):
"""
#19882
"""
output = self.engine.render_to_string("for-tag-filter-ws", {"s": "abc"})
self.assertEqual(output, "abc")
@setup(
{"for-tag-unpack-strs": "{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}"}
)
def test_for_tag_unpack_strs(self):
output = self.engine.render_to_string(
"for-tag-unpack-strs", {"items": ("ab", "ac")}
)
self.assertEqual(output, "a:b/a:c/")
@setup({"for-tag-unpack10": "{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}"})
def test_for_tag_unpack10(self):
with self.assertRaisesMessage(
ValueError, "Need 2 values to unpack in for loop; got 3."
):
self.engine.render_to_string(
"for-tag-unpack10",
{"items": (("one", 1, "carrot"), ("two", 2, "orange"))},
)
@setup(
{
"for-tag-unpack11": (
"{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}"
)
}
)
def test_for_tag_unpack11(self):
with self.assertRaisesMessage(
ValueError, "Need 3 values to unpack in for loop; got 2."
):
self.engine.render_to_string(
"for-tag-unpack11",
{"items": (("one", 1), ("two", 2))},
)
@setup(
{
"for-tag-unpack12": (
"{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}"
)
}
)
def test_for_tag_unpack12(self):
with self.assertRaisesMessage(
ValueError, "Need 3 values to unpack in for loop; got 2."
):
self.engine.render_to_string(
"for-tag-unpack12", {"items": (("one", 1, "carrot"), ("two", 2))}
)
@setup({"for-tag-unpack14": "{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}"})
def test_for_tag_unpack14(self):
with self.assertRaisesMessage(
ValueError, "Need 2 values to unpack in for loop; got 1."
):
self.engine.render_to_string("for-tag-unpack14", {"items": (1, 2)})
@setup(
{
"main": '{% with alpha=alpha.values %}{% include "base" %}{% endwith %}_'
'{% with alpha=alpha.extra %}{% include "base" %}{% endwith %}',
"base": "{% for x, y in alpha %}{{ x }}:{{ y }},{% endfor %}",
}
)
def test_for_tag_context(self):
"""
ForNode.render() pops the values it pushes to the context (#28001).
"""
output = self.engine.render_to_string(
"main",
{
"alpha": {
"values": [("two", 2), ("four", 4)],
"extra": [("six", 6), ("eight", 8)],
},
},
)
self.assertEqual(output, "two:2,four:4,_six:6,eight:8,")
@setup({"invalid_for_loop": "{% for x items %}{{ x }}{% endfor %}"})
def test_invalid_arg(self):
msg = "'for' statements should have at least four words: for x items"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("invalid_for_loop", {"items": (1, 2)})
@setup({"invalid_for_loop": "{% for x from items %}{{ x }}{% endfor %}"})
def test_invalid_in_keyword(self):
msg = "'for' statements should use the format 'for x in y': for x from items"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string("invalid_for_loop", {"items": (1, 2)})
@setup(
{
"forloop-length": "{% for val in values %}{{ forloop.length }}{% endfor %}",
"forloop-length-reversed": "{% for val in values reversed %}"
"{{ forloop.length }}{% endfor %}",
}
)
def test_forloop_length(self):
cases = [
([1, 2, 3], "333"),
([1, 2, 3, 4, 5, 6], "666666"),
([], ""),
]
for values, expected_output in cases:
for template in ["forloop-length", "forloop-length-reversed"]:
with self.subTest(expected_output=expected_output, template=template):
output = self.engine.render_to_string(template, {"values": values})
self.assertEqual(output, expected_output)
| ForTagTests |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/git_sparse_b/package.py | {
"start": 217,
"end": 550
} | class ____(Package):
"""Partal clone of the mock_git_repository fixture"""
# git='to-be-filled-in-by-test'
# ----------------------------
# -- mock_git_repository
version("main", branch="many_dirs")
homepage = "http://www.git-fetch-example.com"
submodules = False
git_sparse_paths = ["dir1"]
| GitSparseB |
python | google__jax | tests/third_party/scipy/line_search_test.py | {
"start": 245,
"end": 4869
} | class ____(jtu.JaxTestCase):
# -- scalar functions; must have dphi(0.) < 0
def assert_wolfe(self, s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
"""
Check that strong Wolfe conditions apply
"""
phi1 = phi(s)
phi0 = phi(0)
derphi0 = derphi(0)
derphi1 = derphi(s)
msg = "s = {}; phi(0) = {}; phi(s) = {}; phi'(0) = {}; phi'(s) = {}; {}".format(
s, phi0, phi1, derphi0, derphi1, err_msg)
self.assertTrue(phi1 <= phi0 + c1 * s * derphi0, "Wolfe 1 failed: " + msg)
self.assertTrue(abs(derphi1) <= abs(c2 * derphi0), "Wolfe 2 failed: " + msg)
def assert_line_wolfe(self, x, p, s, f, fprime, **kw):
self.assert_wolfe(s, phi=lambda sp: f(x + p * sp),
derphi=lambda sp: jnp.dot(fprime(x + p * sp), p), **kw)
def _scalar_func_1(self, s):
p = -s - s ** 3 + s ** 4
dp = -1 - 3 * s ** 2 + 4 * s ** 3
return p, dp
def _scalar_func_2(self, s):
p = jnp.exp(-4 * s) + s ** 2
dp = -4 * jnp.exp(-4 * s) + 2 * s
return p, dp
def _scalar_func_3(self, s):
p = -jnp.sin(10 * s)
dp = -10 * jnp.cos(10 * s)
return p, dp
# -- n-d functions
def _line_func_1(self, x):
f = jnp.dot(x, x)
df = 2 * x
return f, df
def _line_func_2(self, x):
f = jnp.dot(x, jnp.dot(self.A, x)) + 1
df = jnp.dot(self.A + self.A.T, x)
return f, df
# -- Generic scalar searches
@jtu.sample_product(
name=['_scalar_func_1', '_scalar_func_2', '_scalar_func_3'],
)
def test_scalar_search_wolfe2(self, name):
def bind_index(func, idx):
# Remember Python's closure semantics!
return lambda *a, **kw: func(*a, **kw)[idx]
value = getattr(self, name)
phi = bind_index(value, 0)
derphi = bind_index(value, 1)
for old_phi0 in self.rng().randn(3):
res = line_search(phi, 0., 1.)
s, phi1, derphi1 = res.a_k, res.f_k, res.g_k
self.assertAllClose(phi1, phi(s), check_dtypes=False, atol=1e-6)
if derphi1 is not None:
self.assertAllClose(derphi1, derphi(s), check_dtypes=False, atol=1e-6)
self.assert_wolfe(s, phi, derphi, err_msg=f"{name} {old_phi0:g}")
# -- Generic line searches
@jtu.sample_product(
name=['_line_func_1', '_line_func_2'],
)
@jax.default_matmul_precision("float32")
def test_line_search_wolfe2(self, name):
def bind_index(func, idx):
# Remember Python's closure semantics!
return lambda *a, **kw: func(*a, **kw)[idx]
value = getattr(self, name)
f = bind_index(value, 0)
fprime = bind_index(value, 1)
k = 0
N = 20
rng = self.rng()
# sets A in one of the line funcs
self.A = self.rng().randn(N, N)
while k < 9:
x = rng.randn(N)
p = rng.randn(N)
if jnp.dot(p, fprime(x)) >= 0:
# always pick a descent pk
continue
k += 1
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
res = line_search(f, x, p, old_fval=f0, gfk=g0)
s = res.a_k
fv = res.f_k
gv = res.g_k
self.assertAllClose(fv, f(x + s * p), check_dtypes=False, atol=1e-5)
if gv is not None:
self.assertAllClose(gv, fprime(x + s * p), check_dtypes=False, atol=1e-5)
def test_line_search_wolfe2_bounds(self):
# See gh-7475
# For this f and p, starting at a point on axis 0, the strong Wolfe
# condition 2 is met if and only if the step length s satisfies
# |x + s| <= c2 * |x|
f = lambda x: jnp.dot(x, x)
fp = lambda x: 2 * x
p = jnp.array([1.0, 0.0])
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
x = -60 * p
c2 = 0.5
res = line_search(f, x, p, c2=c2)
s = res.a_k
# s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
self.assert_line_wolfe(x, p, s, f, fp)
self.assertTrue(s >= 30.)
res = line_search(f, x, p, c2=c2, maxiter=5)
self.assertTrue(res.failed)
# s=30 will only be tried on the 6th iteration, so this won't converge
def test_line_search(self):
def f(x):
return jnp.cos(jnp.sum(jnp.exp(-x)) ** 2)
# assert not line_search(jax.value_and_grad(f), np.ones(2), np.array([-0.5, -0.25])).failed
xk = jnp.ones(2)
pk = jnp.array([-0.5, -0.25], dtype=xk.dtype)
res = line_search(f, xk, pk, maxiter=100)
with jax.numpy_dtype_promotion('standard'):
scipy_res = scipy.optimize.line_search(f, grad(f), xk, pk)
self.assertAllClose(scipy_res[0], res.a_k, atol=1e-5, check_dtypes=False)
self.assertAllClose(scipy_res[3], res.f_k, atol=1e-5, check_dtypes=False)
# -- More specific tests
if __name__ == "__main__":
absltest.main()
| TestLineSearch |
python | PyCQA__pylint | tests/functional/k/keyword_arg_before_vararg.py | {
"start": 301,
"end": 955
} | class ____:
"""class AAAA"""
def func_in_class(self, param1, param2=2, *args): # [keyword-arg-before-vararg]
"method in class AAAA"
pass
@staticmethod
def static_method_in_class(param1, param2=3, *args): # [keyword-arg-before-vararg]
"static method in class AAAA"
pass
@classmethod
def class_method_in_class(cls, param1, param2=4, *args): # [keyword-arg-before-vararg]
"class method in class AAAA"
pass
some_var = AAAA()
some_var.func_in_class(3)
some_var.static_method_in_class(4)
AAAA.static_method_in_class(4)
some_var.class_method_in_class(5)
AAAA.class_method_in_class(5)
| AAAA |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 17440,
"end": 17538
} | class ____(OpcodeWithArg):
_FLAGS = HAS_ARGUMENT | HAS_JABS
__slots__ = ()
| JUMP_IF_NOT_EXC_MATCH |
python | FactoryBoy__factory_boy | tests/test_docs_internals.py | {
"start": 1214,
"end": 1836
} | class ____:
def __init__(
self,
username,
full_name,
is_active=True,
is_superuser=False,
is_staff=False,
creation_date=None,
deactivation_date=None,
):
self.username = username
self.full_name = full_name
self.is_active = is_active
self.is_superuser = is_superuser
self.is_staff = is_staff
self.creation_date = creation_date
self.deactivation_date = deactivation_date
self.logs = []
def log(self, action, timestamp):
UserLog(user=self, action=action, timestamp=timestamp)
| User |
python | ray-project__ray | doc/source/serve/doc_code/key_concepts.py | {
"start": 737,
"end": 826
} | class ____:
def __call__(self) -> str:
return " world!"
@serve.deployment
| World |
python | pyinstaller__pyinstaller | tests/unit/test_hookutils.py | {
"start": 2642,
"end": 4319
} | class ____(object):
# Removing a suffix from a filename with no extension returns the filename.
def test_no_extension(self):
assert 'file' == hookutils.remove_file_extension('file')
# A filename with two extensions should have only the first removed.
def test_two_extensions(self):
assert 'file.1' == hookutils.remove_file_extension('file.1.2')
# Standard case - remove an extension
def test_remove_ext(self):
assert 'file' == hookutils.remove_file_extension('file.1')
# Unix-style .files are not treated as extensions
def test_unixstyle_not_ext(self):
assert '.file' == hookutils.remove_file_extension('.file')
# Unix-style .file.ext works
def test_unixstyle_ext(self):
assert '.file' == hookutils.remove_file_extension('.file.1')
# Unix-style .file.ext works
def test_unixstyle_path(self):
assert '/a/b/c' == hookutils.remove_file_extension('/a/b/c')
assert '/a/b/c' == hookutils.remove_file_extension('/a/b/c.1')
# Windows-style .file.ext works
def test_win32style_path(self):
assert 'C:\\a\\b\\c' == hookutils.remove_file_extension('C:\\a\\b\\c')
assert 'C:\\a\\b\\c' == hookutils.remove_file_extension('C:\\a\\b\\c.1')
# The name of the hookutils test files directory
TEST_MOD = 'hookutils_package'
# The path to this directory.
TEST_MOD_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'hookutils_files')
@pytest.fixture
def mod_list(monkeypatch):
monkeypatch.syspath_prepend(TEST_MOD_PATH)
# Use the hookutils_test_files package for testing.
return hookutils.collect_submodules(TEST_MOD)
| TestRemoveExtension |
python | huggingface__transformers | src/transformers/models/ovis2/modular_ovis2.py | {
"start": 2037,
"end": 2777
} | class ____(SiglipVisionEmbeddings):
def __init__(self, config: Ovis2VisionConfig):
super().__init__(config)
self.rms_norm = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
def interpolate_pos_encoding(self):
raise NotImplementedError("Not needed for Ovis2")
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
embeddings = patch_embeds.flatten(2).transpose(1, 2)
embeddings = self.rms_norm(embeddings)
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
| Ovis2VisionEmbeddings |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 893,
"end": 1174
} | class ____:
"""Contextual metadata attached to opcodes."""
# Function signature annotations in textual form
signature_annotations: dict[str, str] | None = None
# Code run out of line-number order, due to compiler optimisations.
is_out_of_order: bool = False
| OpcodeMetadata |
python | pytorch__pytorch | torch/_dynamo/profiler.py | {
"start": 791,
"end": 2098
} | class ____:
microseconds: float = 0.0
operators: int = 0
fusions: int = 0
graphs: int = 0
def __iadd__(self, other: Self) -> Self:
self.microseconds += other.microseconds
self.operators += other.operators
self.fusions += other.fusions
return self
def __add__(self, other: ProfileMetrics) -> ProfileMetrics:
assert isinstance(other, ProfileMetrics)
return ProfileMetrics(
self.microseconds + other.microseconds,
self.operators + other.operators,
self.fusions + other.fusions,
)
def __truediv__(self, other: Any) -> ProfileMetrics:
if isinstance(other, int):
other = ProfileMetrics(other, other, other)
return ProfileMetrics(
# pyrefly: ignore [no-matching-overload]
self.microseconds / max(1, other.microseconds),
# pyrefly: ignore [bad-argument-type]
self.operators / max(1, other.operators),
# pyrefly: ignore [bad-argument-type]
self.fusions / max(1, other.fusions),
)
def __str__(self) -> str:
return f"{self.operators:4.0%} ops {self.microseconds:4.0%} time"
def tocsv(self) -> list[float]:
return [self.operators, self.microseconds]
| ProfileMetrics |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF053.py | {
"start": 1522,
"end": 1675
} | class ____[T]:
class D[U](Generic[T, U]): ...
# In a single run, only the first is reported.
# Others will be reported/fixed in following iterations.
| C |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.