language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | plotly__plotly.py | plotly/graph_objs/pie/_textfont.py | {
"start": 233,
"end": 17087
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "pie"
_path_str = "pie.textfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Textfont object
Sets the font used for `textinfo`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.pie.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.pie.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.pie.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 46586,
"end": 47787
} | class ____(Response):
"""
Response of models.delete_metadata endpoint.
:param updated: Number of models updated (0 or 1)
:type updated: int
"""
_service = "models"
_action = "delete_metadata"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of models updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(DeleteMetadataResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| DeleteMetadataResponse |
python | uqfoundation__dill | dill/_objects.py | {
"start": 1953,
"end": 2028
} | class ____:
def __call__(self):
pass
_instance2 = _class2()
| _class2 |
python | aimacode__aima-python | agents4e.py | {
"start": 28515,
"end": 36976
} | class ____(XYEnvironment):
pit_probability = 0.2 # Probability to spawn a pit in a location. (From Chapter 7.2)
# Room should be 4x4 grid of rooms. The extra 2 for walls
def __init__(self, agent_program, width=6, height=6):
super().__init__(width, height)
self.init_world(agent_program)
def init_world(self, program):
"""Spawn items in the world based on probabilities from the book"""
"WALLS"
self.add_walls()
"PITS"
for x in range(self.x_start, self.x_end):
for y in range(self.y_start, self.y_end):
if random.random() < self.pit_probability:
self.add_thing(Pit(), (x, y), True)
self.add_thing(Breeze(), (x - 1, y), True)
self.add_thing(Breeze(), (x, y - 1), True)
self.add_thing(Breeze(), (x + 1, y), True)
self.add_thing(Breeze(), (x, y + 1), True)
"WUMPUS"
w_x, w_y = self.random_location_inbounds(exclude=(1, 1))
self.add_thing(Wumpus(lambda x: ""), (w_x, w_y), True)
self.add_thing(Stench(), (w_x - 1, w_y), True)
self.add_thing(Stench(), (w_x + 1, w_y), True)
self.add_thing(Stench(), (w_x, w_y - 1), True)
self.add_thing(Stench(), (w_x, w_y + 1), True)
"GOLD"
self.add_thing(Gold(), self.random_location_inbounds(exclude=(1, 1)), True)
"AGENT"
self.add_thing(Explorer(program), (1, 1), True)
def get_world(self, show_walls=True):
"""Return the items in the world"""
result = []
x_start, y_start = (0, 0) if show_walls else (1, 1)
if show_walls:
x_end, y_end = self.width, self.height
else:
x_end, y_end = self.width - 1, self.height - 1
for x in range(x_start, x_end):
row = []
for y in range(y_start, y_end):
row.append(self.list_things_at((x, y)))
result.append(row)
return result
def percepts_from(self, agent, location, tclass=Thing):
"""Return percepts from a given location,
and replaces some items with percepts from chapter 7."""
thing_percepts = {
Gold: Glitter(),
Wall: Bump(),
Wumpus: Stench(),
Pit: Breeze()}
"""Agents don't need to get their percepts"""
thing_percepts[agent.__class__] = None
"""Gold only glitters in its cell"""
if location != agent.location:
thing_percepts[Gold] = None
result = [thing_percepts.get(thing.__class__, thing) for thing in self.things
if thing.location == location and isinstance(thing, tclass)]
return result if len(result) else [None]
def percept(self, agent):
"""Return things in adjacent (not diagonal) cells of the agent.
Result format: [Left, Right, Up, Down, Center / Current location]"""
x, y = agent.location
result = []
result.append(self.percepts_from(agent, (x - 1, y)))
result.append(self.percepts_from(agent, (x + 1, y)))
result.append(self.percepts_from(agent, (x, y - 1)))
result.append(self.percepts_from(agent, (x, y + 1)))
result.append(self.percepts_from(agent, (x, y)))
"""The wumpus gives out a loud scream once it's killed."""
wumpus = [thing for thing in self.things if isinstance(thing, Wumpus)]
if len(wumpus) and not wumpus[0].alive and not wumpus[0].screamed:
result[-1].append(Scream())
wumpus[0].screamed = True
return result
def execute_action(self, agent, action):
"""Modify the state of the environment based on the agent's actions.
Performance score taken directly out of the book."""
if isinstance(agent, Explorer) and self.in_danger(agent):
return
agent.bump = False
if action == 'TurnRight':
agent.direction += Direction.R
agent.performance -= 1
elif action == 'TurnLeft':
agent.direction += Direction.L
agent.performance -= 1
elif action == 'Forward':
agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location))
agent.performance -= 1
elif action == 'Grab':
things = [thing for thing in self.list_things_at(agent.location)
if agent.can_grab(thing)]
if len(things):
print("Grabbing", things[0].__class__.__name__)
if len(things):
agent.holding.append(things[0])
agent.performance -= 1
elif action == 'Climb':
if agent.location == (1, 1): # Agent can only climb out of (1,1)
agent.performance += 1000 if Gold() in agent.holding else 0
self.delete_thing(agent)
elif action == 'Shoot':
"""The arrow travels straight down the path the agent is facing"""
if agent.has_arrow:
arrow_travel = agent.direction.move_forward(agent.location)
while self.is_inbounds(arrow_travel):
wumpus = [thing for thing in self.list_things_at(arrow_travel)
if isinstance(thing, Wumpus)]
if len(wumpus):
wumpus[0].alive = False
break
arrow_travel = agent.direction.move_forward(agent.location)
agent.has_arrow = False
def in_danger(self, agent):
"""Check if Explorer is in danger (Pit or Wumpus), if he is, kill him"""
for thing in self.list_things_at(agent.location):
if isinstance(thing, Pit) or (isinstance(thing, Wumpus) and thing.alive):
agent.alive = False
agent.performance -= 1000
agent.killed_by = thing.__class__.__name__
return True
return False
def is_done(self):
"""The game is over when the Explorer is killed
or if he climbs out of the cave only at (1,1)."""
explorer = [agent for agent in self.agents if isinstance(agent, Explorer)]
if len(explorer):
if explorer[0].alive:
return False
else:
print("Death by {} [-1000].".format(explorer[0].killed_by))
else:
print("Explorer climbed out {}."
.format("with Gold [+1000]!" if Gold() not in self.things else "without Gold [+0]"))
return True
# TODO: Arrow needs to be implemented
# ______________________________________________________________________________
def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000):
"""See how well each of several agents do in n instances of an environment.
Pass in a factory (constructor) for environments, and several for agents.
Create n instances of the environment, and run each agent in copies of
each one for steps. Return a list of (agent, average-score) tuples.
>>> environment = TrivialVacuumEnvironment
>>> agents = [ModelBasedVacuumAgent, ReflexVacuumAgent]
>>> result = compare_agents(environment, agents)
>>> performance_ModelBasedVacuumAgent = result[0][1]
>>> performance_ReflexVacuumAgent = result[1][1]
>>> performance_ReflexVacuumAgent <= performance_ModelBasedVacuumAgent
True
"""
envs = [EnvFactory() for i in range(n)]
return [(A, test_agent(A, steps, copy.deepcopy(envs)))
for A in AgentFactories]
def test_agent(AgentFactory, steps, envs):
"""Return the mean score of running an agent in each of the envs, for steps
>>> def constant_prog(percept):
... return percept
...
>>> agent = Agent(constant_prog)
>>> result = agent.program(5)
>>> result == 5
True
"""
def score(env):
agent = AgentFactory()
env.add_thing(agent)
env.run(steps)
return agent.performance
return mean(map(score, envs))
# _________________________________________________________________________
__doc__ += """
>>> a = ReflexVacuumAgent()
>>> a.program((loc_A, 'Clean'))
'Right'
>>> a.program((loc_B, 'Clean'))
'Left'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> a.program((loc_A, 'Dirty'))
'Suck'
>>> e = TrivialVacuumEnvironment()
>>> e.add_thing(ModelBasedVacuumAgent())
>>> e.run(5)
"""
| WumpusEnvironment |
python | python-attrs__attrs | typing-examples/mypy.py | {
"start": 2097,
"end": 2318
} | class ____:
x: int = attr.ib(converter=attr.converters.to_bool)
ConvCToBool(1)
ConvCToBool(True)
ConvCToBool("on")
ConvCToBool("yes")
ConvCToBool(0)
ConvCToBool(False)
ConvCToBool("n")
# Validators
@attr.s
| ConvCToBool |
python | sympy__sympy | sympy/vector/vector.py | {
"start": 744,
"end": 12351
} | class ____(BasisDependent):
"""
Super class for all Vector classes.
Ideally, neither this class nor any of its subclasses should be
instantiated by the user.
"""
is_scalar = False
is_Vector = True
_op_priority = 12.0
_expr_type: type[Vector]
_mul_func: type[Vector]
_add_func: type[Vector]
_zero_func: type[Vector]
_base_func: type[Vector]
zero: VectorZero
kind: VectorKind = VectorKind()
@property
def components(self):
"""
Returns the components of this vector in the form of a
Python dictionary mapping BaseVector instances to the
corresponding measure numbers.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v.components
{C.i: 3, C.j: 4, C.k: 5}
"""
# The '_components' attribute is defined according to the
# subclass of Vector the instance belongs to.
return self._components
def magnitude(self):
"""
Returns the magnitude of this vector.
"""
return sqrt(self & self)
def normalize(self):
"""
Returns the normalized version of this vector.
"""
return self / self.magnitude()
def equals(self, other):
"""
Check if ``self`` and ``other`` are identically equal vectors.
Explanation
===========
Checks if two vector expressions are equal for all possible values of
the symbols present in the expressions.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.abc import x, y
>>> from sympy import pi
>>> C = CoordSys3D('C')
Compare vectors that are equal or not:
>>> C.i.equals(C.j)
False
>>> C.i.equals(C.i)
True
These two vectors are equal if `x = y` but are not identically equal
as expressions since for some values of `x` and `y` they are unequal:
>>> v1 = x*C.i + C.j
>>> v2 = y*C.i + C.j
>>> v1.equals(v1)
True
>>> v1.equals(v2)
False
Vectors from different coordinate systems can be compared:
>>> D = C.orient_new_axis('D', pi/2, C.i)
>>> D.j.equals(C.j)
False
>>> D.j.equals(C.k)
True
Parameters
==========
other: Vector
The other vector expression to compare with.
Returns
=======
``True``, ``False`` or ``None``. A return value of ``True`` indicates
that the two vectors are identically equal. A return value of ``False``
indicates that they are not. In some cases it is not possible to
determine if the two vectors are identically equal and ``None`` is
returned.
See Also
========
sympy.core.expr.Expr.equals
"""
diff = self - other
diff_mag2 = diff.dot(diff)
return diff_mag2.equals(0)
def dot(self, other):
"""
Returns the dot product of this Vector, either with another
Vector, or a Dyadic, or a Del operator.
If 'other' is a Vector, returns the dot product scalar (SymPy
expression).
If 'other' is a Dyadic, the dot product is returned as a Vector.
If 'other' is an instance of Del, returns the directional
derivative operator as a Python function. If this function is
applied to a scalar expression, it returns the directional
derivative of the scalar field wrt this Vector.
Parameters
==========
other: Vector/Dyadic/Del
The Vector or Dyadic we are dotting with, or a Del operator .
Examples
========
>>> from sympy.vector import CoordSys3D, Del
>>> C = CoordSys3D('C')
>>> delop = Del()
>>> C.i.dot(C.j)
0
>>> C.i & C.i
1
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v.dot(C.k)
5
>>> (C.i & delop)(C.x*C.y*C.z)
C.y*C.z
>>> d = C.i.outer(C.i)
>>> C.i.dot(d)
C.i
"""
# Check special cases
if isinstance(other, Dyadic):
if isinstance(self, VectorZero):
return Vector.zero
outvec = Vector.zero
for k, v in other.components.items():
vect_dot = k.args[0].dot(self)
outvec += vect_dot * v * k.args[1]
return outvec
from sympy.vector.deloperator import Del
if not isinstance(other, (Del, Vector)):
raise TypeError(str(other) + " is not a vector, dyadic or " +
"del operator")
# Check if the other is a del operator
if isinstance(other, Del):
def directional_derivative(field):
from sympy.vector.functions import directional_derivative
return directional_derivative(field, self)
return directional_derivative
return dot(self, other)
def __and__(self, other):
return self.dot(other)
__and__.__doc__ = dot.__doc__
def cross(self, other):
"""
Returns the cross product of this Vector with another Vector or
Dyadic instance.
The cross product is a Vector, if 'other' is a Vector. If 'other'
is a Dyadic, this returns a Dyadic instance.
Parameters
==========
other: Vector/Dyadic
The Vector or Dyadic we are crossing with.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> C.i.cross(C.j)
C.k
>>> C.i ^ C.i
0
>>> v = 3*C.i + 4*C.j + 5*C.k
>>> v ^ C.i
5*C.j + (-4)*C.k
>>> d = C.i.outer(C.i)
>>> C.j.cross(d)
(-1)*(C.k|C.i)
"""
# Check special cases
if isinstance(other, Dyadic):
if isinstance(self, VectorZero):
return Dyadic.zero
outdyad = Dyadic.zero
for k, v in other.components.items():
cross_product = self.cross(k.args[0])
outer = cross_product.outer(k.args[1])
outdyad += v * outer
return outdyad
return cross(self, other)
def __xor__(self, other):
return self.cross(other)
__xor__.__doc__ = cross.__doc__
def outer(self, other):
"""
Returns the outer product of this vector with another, in the
form of a Dyadic instance.
Parameters
==========
other : Vector
The Vector with respect to which the outer product is to
be computed.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> N = CoordSys3D('N')
>>> N.i.outer(N.j)
(N.i|N.j)
"""
# Handle the special cases
if not isinstance(other, Vector):
raise TypeError("Invalid operand for outer product")
elif (isinstance(self, VectorZero) or
isinstance(other, VectorZero)):
return Dyadic.zero
# Iterate over components of both the vectors to generate
# the required Dyadic instance
args = [(v1 * v2) * BaseDyadic(k1, k2) for (k1, v1), (k2, v2)
in product(self.components.items(), other.components.items())]
return DyadicAdd(*args)
def projection(self, other, scalar=False):
"""
Returns the vector or scalar projection of the 'other' on 'self'.
Examples
========
>>> from sympy.vector.coordsysrect import CoordSys3D
>>> C = CoordSys3D('C')
>>> i, j, k = C.base_vectors()
>>> v1 = i + j + k
>>> v2 = 3*i + 4*j
>>> v1.projection(v2)
7/3*C.i + 7/3*C.j + 7/3*C.k
>>> v1.projection(v2, scalar=True)
7/3
"""
if self.equals(Vector.zero):
return S.Zero if scalar else Vector.zero
if scalar:
return self.dot(other) / self.dot(self)
else:
return self.dot(other) / self.dot(self) * self
@property
def _projections(self):
"""
Returns the components of this vector but the output includes
also zero values components.
Examples
========
>>> from sympy.vector import CoordSys3D, Vector
>>> C = CoordSys3D('C')
>>> v1 = 3*C.i + 4*C.j + 5*C.k
>>> v1._projections
(3, 4, 5)
>>> v2 = C.x*C.y*C.z*C.i
>>> v2._projections
(C.x*C.y*C.z, 0, 0)
>>> v3 = Vector.zero
>>> v3._projections
(0, 0, 0)
"""
from sympy.vector.operators import _get_coord_systems
if isinstance(self, VectorZero):
return (S.Zero, S.Zero, S.Zero)
base_vec = next(iter(_get_coord_systems(self))).base_vectors()
return tuple([self.dot(i) for i in base_vec])
def __or__(self, other):
return self.outer(other)
__or__.__doc__ = outer.__doc__
def to_matrix(self, system):
"""
Returns the matrix form of this vector with respect to the
specified coordinate system.
Parameters
==========
system : CoordSys3D
The system wrt which the matrix form is to be computed
Examples
========
>>> from sympy.vector import CoordSys3D
>>> C = CoordSys3D('C')
>>> from sympy.abc import a, b, c
>>> v = a*C.i + b*C.j + c*C.k
>>> v.to_matrix(C)
Matrix([
[a],
[b],
[c]])
"""
return Matrix([self.dot(unit_vec) for unit_vec in
system.base_vectors()])
def separate(self):
"""
The constituents of this vector in different coordinate systems,
as per its definition.
Returns a dict mapping each CoordSys3D to the corresponding
constituent Vector.
Examples
========
>>> from sympy.vector import CoordSys3D
>>> R1 = CoordSys3D('R1')
>>> R2 = CoordSys3D('R2')
>>> v = R1.i + R2.i
>>> v.separate() == {R1: R1.i, R2: R2.i}
True
"""
parts = {}
for vect, measure in self.components.items():
parts[vect.system] = (parts.get(vect.system, Vector.zero) +
vect * measure)
return parts
def _div_helper(one, other):
""" Helper for division involving vectors. """
if isinstance(one, Vector) and isinstance(other, Vector):
raise TypeError("Cannot divide two vectors")
elif isinstance(one, Vector):
if other == S.Zero:
raise ValueError("Cannot divide a vector by zero")
return VectorMul(one, Pow(other, S.NegativeOne))
else:
raise TypeError("Invalid division involving a vector")
# The following is adapted from the matrices.expressions.matexpr file
def get_postprocessor(cls):
def _postprocessor(expr):
vec_class = {Add: VectorAdd}[cls]
vectors = []
for term in expr.args:
if isinstance(term.kind, VectorKind):
vectors.append(term)
if vec_class == VectorAdd:
return VectorAdd(*vectors).doit(deep=False)
return _postprocessor
Basic._constructor_postprocessor_mapping[Vector] = {
"Add": [get_postprocessor(Add)],
}
| Vector |
python | networkx__networkx | networkx/algorithms/tests/test_summarization.py | {
"start": 9006,
"end": 11804
} | class ____(AbstractSNAP):
relationship_attributes = ()
def test_summary_graph(self):
original_graph = self.build_original_graph()
summary_graph = self.build_summary_graph()
relationship_attributes = ("type",)
generated_summary_graph = nx.snap_aggregation(
original_graph, self.node_attributes
)
relabeled_summary_graph = self.deterministic_labels(generated_summary_graph)
assert nx.is_isomorphic(summary_graph, relabeled_summary_graph)
def build_original_graph(self):
nodes = {
"A": {"color": "Red"},
"B": {"color": "Red"},
"C": {"color": "Red"},
"D": {"color": "Red"},
"E": {"color": "Blue"},
"F": {"color": "Blue"},
"G": {"color": "Blue"},
"H": {"color": "Blue"},
"I": {"color": "Yellow"},
"J": {"color": "Yellow"},
"K": {"color": "Yellow"},
"L": {"color": "Yellow"},
}
edges = [
("A", "B"),
("A", "C"),
("A", "E"),
("A", "I"),
("B", "D"),
("B", "J"),
("B", "F"),
("C", "G"),
("D", "H"),
("I", "J"),
("J", "K"),
("I", "L"),
]
G = nx.Graph()
for node in nodes:
attributes = nodes[node]
G.add_node(node, **attributes)
for source, target in edges:
G.add_edge(source, target)
return G
def build_summary_graph(self):
nodes = {
"Supernode-0": {"color": "Red"},
"Supernode-1": {"color": "Red"},
"Supernode-2": {"color": "Blue"},
"Supernode-3": {"color": "Blue"},
"Supernode-4": {"color": "Yellow"},
"Supernode-5": {"color": "Yellow"},
}
edges = [
("Supernode-0", "Supernode-0"),
("Supernode-0", "Supernode-1"),
("Supernode-0", "Supernode-2"),
("Supernode-0", "Supernode-4"),
("Supernode-1", "Supernode-3"),
("Supernode-4", "Supernode-4"),
("Supernode-4", "Supernode-5"),
]
G = nx.Graph()
for node in nodes:
attributes = nodes[node]
G.add_node(node, **attributes)
for source, target in edges:
G.add_edge(source, target)
supernodes = {
"Supernode-0": {"A", "B"},
"Supernode-1": {"C", "D"},
"Supernode-2": {"E", "F"},
"Supernode-3": {"G", "H"},
"Supernode-4": {"I", "J"},
"Supernode-5": {"K", "L"},
}
nx.set_node_attributes(G, supernodes, "group")
return G
| TestSNAPNoEdgeTypes |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/expression/binary_implicit_string.py | {
"start": 5402,
"end": 5510
} | class ____:
f.write ("Pathway name" + "\t" "Database Identifier" + "\t" "Source database" + "\n")
| EC2REPATH |
python | openai__openai-python | src/openai/types/conversations/item_list_params.py | {
"start": 292,
"end": 1896
} | class ____(TypedDict, total=False):
after: str
"""An item ID to list items after, used in pagination."""
include: List[ResponseIncludable]
"""Specify additional output data to include in the model response.
Currently supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
order: Literal["asc", "desc"]
"""The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
"""
| ItemListParams |
python | kamyu104__LeetCode-Solutions | Python/largest-component-size-by-common-factor.py | {
"start": 143,
"end": 700
} | class ____(object):
def __init__(self, n):
self.set = range(n)
self.size = [1]*n
def find_set(self, x):
if self.set[x] != x:
self.set[x] = self.find_set(self.set[x]) # path compression.
return self.set[x]
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root == y_root:
return False
self.set[min(x_root, y_root)] = max(x_root, y_root)
self.size[max(x_root, y_root)] += self.size[min(x_root, y_root)]
return True
| UnionFind |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-chroma/unit_tests/test_indexer.py | {
"start": 323,
"end": 7130
} | class ____(unittest.TestCase):
def setUp(self):
self.mock_config = ChromaIndexingConfigModel(
**{
"collection_name": "dummy-collection",
"auth_method": {
"mode": "persistent_client",
"path": "/local/path",
},
}
)
self.chroma_indexer = ChromaIndexer(self.mock_config)
self.chroma_indexer._get_client = Mock()
self.mock_client = self.chroma_indexer._get_client()
self.mock_client.get_or_create_collection = Mock()
self.mock_collection = self.mock_client.get_or_create_collection()
self.chroma_indexer.client = self.mock_client
self.mock_client.get_collection = Mock()
def test_valid_collection_name(self):
test_configs = [
({"collection_name": "dummy-collection", "auth_method": {"mode": "persistent_client", "path": "/local/path"}}, None),
(
{"collection_name": "du", "auth_method": {"mode": "persistent_client", "path": "/local/path"}},
"The length of the collection name must be between 3 and 63 characters",
),
(
{
"collection_name": "dummy-collectionxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"auth_method": {"mode": "persistent_client", "path": "/local/path"},
},
"The length of the collection name must be between 3 and 63 characters",
),
(
{"collection_name": "1dummy-colle..ction4", "auth_method": {"mode": "persistent_client", "path": "/local/path"}},
"The collection name must not contain two consecutive dots",
),
(
{"collection_name": "Dummy-coll...ectioN", "auth_method": {"mode": "persistent_client", "path": "/local/path"}},
"The collection name must start and end with a lowercase letter or a digit",
),
(
{"collection_name": "-dum?my-collection-", "auth_method": {"mode": "persistent_client", "path": "/local/path"}},
"The collection name must start and end with a lowercase letter or a digit",
),
(
{"collection_name": "dummy?collection", "auth_method": {"mode": "persistent_client", "path": "/local/path"}},
"The collection name can only contain lower case alphanumerics, dots, dashes, and underscores",
),
(
{"collection_name": "345.4.23.12", "auth_method": {"mode": "persistent_client", "path": "/local/path"}},
"The collection name must not be a valid IP address.",
),
]
for config, expected_error in test_configs:
mock_config = ChromaIndexingConfigModel(**config)
chroma_indexer = ChromaIndexer(mock_config)
chroma_indexer._get_client = Mock()
result = chroma_indexer.check()
self.assertEqual(result, expected_error)
def test_valid_path(self):
test_configs = [
({"collection_name": "dummy-collection", "auth_method": {"mode": "persistent_client", "path": "/local/path"}}, None),
(
{"collection_name": "dummy-collection", "auth_method": {"mode": "persistent_client", "path": "local/path"}},
"Path must be prefixed with /local",
),
(
{"collection_name": "dummy-collection", "auth_method": {"mode": "persistent_client", "path": "/localpath"}},
"Path must be prefixed with /local",
),
(
{"collection_name": "dummy-collection", "auth_method": {"mode": "persistent_client", "path": "./path"}},
"Path must be prefixed with /local",
),
]
for config, expected_error in test_configs:
mock_config = ChromaIndexingConfigModel(**config)
chroma_indexer = ChromaIndexer(mock_config)
chroma_indexer._get_client = Mock()
result = chroma_indexer.check()
self.assertEqual(result, expected_error)
def test_check_returns_expected_result(self):
check_result = self.chroma_indexer.check()
self.assertIsNone(check_result)
self.chroma_indexer._get_client.assert_called()
self.mock_client.heartbeat.assert_called()
self.mock_client.get_or_create_collection.assert_called()
self.mock_client.get_or_create_collection().count.assert_called()
def test_check_handles_failure_conditions(self):
# Test 1: client heartbeat returns error
self.mock_client.heartbeat.side_effect = Exception("Random exception")
result = self.chroma_indexer.check()
self.assertTrue("Random exception" in result)
# Test 2: client server is not alive
self.mock_client.heartbeat.side_effect = None
self.mock_client.heartbeat.return_value = None
result = self.chroma_indexer.check()
self.assertEqual(result, "Chroma client server is not alive")
# Test 3: unable to get collection
self.mock_client.heartbeat.return_value = 45465
self.mock_collection.count.return_value = None
result = self.chroma_indexer.check()
self.assertEqual(result, f"unable to get or create collection with name {self.chroma_indexer.collection_name}")
def test_pre_sync_calls_delete(self):
self.chroma_indexer.pre_sync(
Mock(
streams=[
Mock(
destination_sync_mode=DestinationSyncMode.overwrite,
stream=AirbyteStream(name="some_stream", json_schema={}, supported_sync_modes=[SyncMode.full_refresh]),
)
]
)
)
self.mock_client.get_collection().delete.assert_called_with(where={"_ab_stream": {"$in": ["some_stream"]}})
def test_pre_sync_does_not_call_delete(self):
self.chroma_indexer.pre_sync(
Mock(streams=[Mock(destination_sync_mode=DestinationSyncMode.append, stream=Mock(name="some_stream"))])
)
self.mock_client.get_collection().delete.assert_not_called()
def test_index_calls_insert(self):
self.chroma_indexer.index([Mock(metadata={"key": "value"}, page_content="some content", embedding=[1, 2, 3])], None, "some_stream")
self.mock_client.get_collection().add.assert_called_once()
def test_index_calls_delete(self):
self.chroma_indexer.delete(["some_id"], None, "some_stream")
self.mock_client.get_collection().delete.assert_called_with(where={"_ab_record_id": {"$in": ["some_id"]}})
| TestChromaIndexer |
python | zarr-developers__zarr-python | src/zarr/core/indexing.py | {
"start": 12999,
"end": 18046
} | class ____:
dim_len: int
dim_chunk_len: int
nitems: int
nchunks: int
start: int
stop: int
step: int
def __init__(self, dim_sel: slice, dim_len: int, dim_chunk_len: int) -> None:
# normalize
start, stop, step = dim_sel.indices(dim_len)
if step < 1:
raise NegativeStepError("only slices with step >= 1 are supported.")
object.__setattr__(self, "start", start)
object.__setattr__(self, "stop", stop)
object.__setattr__(self, "step", step)
object.__setattr__(self, "dim_len", dim_len)
object.__setattr__(self, "dim_chunk_len", dim_chunk_len)
object.__setattr__(self, "nitems", max(0, ceildiv((stop - start), step)))
object.__setattr__(self, "nchunks", ceildiv(dim_len, dim_chunk_len))
def __iter__(self) -> Iterator[ChunkDimProjection]:
# figure out the range of chunks we need to visit
dim_chunk_ix_from = 0 if self.start == 0 else self.start // self.dim_chunk_len
dim_chunk_ix_to = ceildiv(self.stop, self.dim_chunk_len)
# iterate over chunks in range
for dim_chunk_ix in range(dim_chunk_ix_from, dim_chunk_ix_to):
# compute offsets for chunk within overall array
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_limit = min(self.dim_len, (dim_chunk_ix + 1) * self.dim_chunk_len)
# determine chunk length, accounting for trailing chunk
dim_chunk_len = dim_limit - dim_offset
if self.start < dim_offset:
# selection starts before current chunk
dim_chunk_sel_start = 0
remainder = (dim_offset - self.start) % self.step
if remainder:
dim_chunk_sel_start += self.step - remainder
# compute number of previous items, provides offset into output array
dim_out_offset = ceildiv((dim_offset - self.start), self.step)
else:
# selection starts within current chunk
dim_chunk_sel_start = self.start - dim_offset
dim_out_offset = 0
if self.stop > dim_limit:
# selection ends after current chunk
dim_chunk_sel_stop = dim_chunk_len
else:
# selection ends within current chunk
dim_chunk_sel_stop = self.stop - dim_offset
dim_chunk_sel = slice(dim_chunk_sel_start, dim_chunk_sel_stop, self.step)
dim_chunk_nitems = ceildiv((dim_chunk_sel_stop - dim_chunk_sel_start), self.step)
# If there are no elements on the selection within this chunk, then skip
if dim_chunk_nitems == 0:
continue
dim_out_sel = slice(dim_out_offset, dim_out_offset + dim_chunk_nitems)
is_complete_chunk = (
dim_chunk_sel_start == 0 and (self.stop >= dim_limit) and self.step in [1, None]
)
yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel, is_complete_chunk)
def check_selection_length(selection: SelectionNormalized, shape: tuple[int, ...]) -> None:
if len(selection) > len(shape):
err_too_many_indices(selection, shape)
def replace_ellipsis(selection: Any, shape: tuple[int, ...]) -> SelectionNormalized:
selection = ensure_tuple(selection)
# count number of ellipsis present
n_ellipsis = sum(1 for i in selection if i is Ellipsis)
if n_ellipsis > 1:
# more than 1 is an error
raise IndexError("an index can only have a single ellipsis ('...')")
elif n_ellipsis == 1:
# locate the ellipsis, count how many items to left and right
n_items_l = selection.index(Ellipsis) # items to left of ellipsis
n_items_r = len(selection) - (n_items_l + 1) # items to right of ellipsis
n_items = len(selection) - 1 # all non-ellipsis items
if n_items >= len(shape):
# ellipsis does nothing, just remove it
selection = tuple(i for i in selection if i != Ellipsis)
else:
# replace ellipsis with as many slices are needed for number of dims
new_item = selection[:n_items_l] + ((slice(None),) * (len(shape) - n_items))
if n_items_r:
new_item += selection[-n_items_r:]
selection = new_item
# fill out selection if not completely specified
if len(selection) < len(shape):
selection += (slice(None),) * (len(shape) - len(selection))
# check selection not too long
check_selection_length(selection, shape)
return cast("SelectionNormalized", selection)
def replace_lists(selection: SelectionNormalized) -> SelectionNormalized:
return tuple(
np.asarray(dim_sel) if isinstance(dim_sel, list) else dim_sel for dim_sel in selection
)
T = TypeVar("T")
def ensure_tuple(v: Any) -> SelectionNormalized:
if not isinstance(v, tuple):
v = (v,)
return cast("SelectionNormalized", v)
| SliceDimIndexer |
python | spack__spack | lib/spack/spack/binary_distribution.py | {
"start": 94088,
"end": 94295
} | class ____(spack.error.SpackError):
"""Raised when a buildcache cannot be read for any reason"""
FetchIndexResult = collections.namedtuple("FetchIndexResult", "etag hash data fresh")
| BuildcacheIndexError |
python | getsentry__sentry | src/sentry/web/frontend/project_event.py | {
"start": 226,
"end": 934
} | class ____(ProjectView):
required_scope = "event:read"
def handle(
self, request: HttpRequest, organization, project, client_event_id
) -> HttpResponseRedirect:
"""
Given a client event id and project, redirects to the event page
"""
event = eventstore.backend.get_event_by_id(project.id, client_event_id)
if event is None:
raise Http404
if not event.group_id:
raise Http404
path = reverse(
"sentry-organization-event-detail",
args=[organization.slug, event.group_id, event.event_id],
)
return HttpResponseRedirect(organization.absolute_url(path))
| ProjectEventRedirect |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/descriptors.py | {
"start": 23141,
"end": 23523
} | class ____(DifferentiableAOTOutput):
idx: int
def expr(self) -> str:
return f"__aliased_arg_with_metadata_mutation{self.idx}"
# NB: this is marked differentiable as it /would/ be differentiable if we
# support double backwards, but we never generate this today because we
# don't support double backwards.
@dataclasses.dataclass(frozen=True)
| MetadataMutationAOTOutput |
python | facebook__pyre-check | client/commands/tests/libcst_util_test.py | {
"start": 769,
"end": 5984
} | class ____(testslide.TestCase):
def setUp(self) -> None:
self.maxDiff = None
def test_success_case(self) -> None:
"""
Tests success cases for:
1. TODO:import statement
2. function name
3. imported Types
4. global scoped variables
5. out of order variables
"""
test_code: str = """
import os
from pathlib import Path as TestPath
test_path: str = "TEST_PATH"
def get_path() -> TestPath:
return TestPath(os.environ[test_path])
def count_level() -> int:
return x.split("")
x = get_path()
print(count_level())
"""
# 2 - get_path
visitor: libcst_util.QualifiedNameWithPositionVisitor = (
libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=7, character=4),
)
)
results = visitor.find_references()
self.assertEqual(
results, [create_lsp_range(7, 5, 13), create_lsp_range(13, 5, 13)]
)
# 3 - `TestPath` in return statement of `get_path`
visitor = libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=7, character=19),
)
results = visitor.find_references()
self.assertEqual(
results, [create_lsp_range(7, 19, 27), create_lsp_range(8, 12, 20)]
)
# 4 - `test_path`
visitor = libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=5, character=1),
)
results = visitor.find_references()
self.assertEqual(
results, [create_lsp_range(5, 1, 10), create_lsp_range(8, 32, 41)]
)
# 5 - `x`
visitor = libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=11, character=12),
)
references_to_x = [create_lsp_range(11, 12, 13), create_lsp_range(13, 1, 2)]
results = visitor.find_references()
self.assertEqual(results, references_to_x)
visitor = libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=13, character=1),
)
results = visitor.find_references()
self.assertEqual(results, references_to_x)
"""
Things we dont' expect to return references for:
1. Keywords
2. Literals
a. string
b. int
c. bool
"""
def test_keyword(self) -> None:
test_code: str = """
for x in y:
print(x)
for foo in bar:
pass
"""
visitor: libcst_util.QualifiedNameWithPositionVisitor = (
libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=2, character=1),
)
)
results = visitor.find_references()
self.assertEqual(results, [])
visitor = libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=6, character=4),
)
results = visitor.find_references()
self.assertEqual(results, [])
def test_int(self) -> None:
test_code: str = """
a : int = 1
b : int = 1 + 1
"""
visitor: libcst_util.QualifiedNameWithPositionVisitor = (
libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=2, character=11),
)
)
results = visitor.find_references()
self.assertEqual(results, [])
def test_booleans(self) -> None:
test_code: str = """
def foo() -> None:
if True:
return False
elif False:
pass
return True
"""
visitor: libcst_util.QualifiedNameWithPositionVisitor = (
libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=3, character=8),
)
)
results = visitor.find_references()
self.assertEqual(results, [])
def test_string(self) -> None:
test_code: str = """
c: string = "hello"
d: string = "hello" + "world"
"""
visitor: libcst_util.QualifiedNameWithPositionVisitor = (
libcst_util.generate_qualified_name_with_position_visitor(
Path(test_path),
Path(test_root),
test_code,
lsp.PyrePosition(line=2, character=14),
)
)
results = visitor.find_references()
self.assertEqual(results, [])
| LibcstUtilTest |
python | pytorch__pytorch | test/export/test_torchbind.py | {
"start": 47841,
"end": 65659
} | class ____(TestCase):
def setUp(self):
init_torchbind_implementations()
@torch._library.register_fake_class("_TorchScriptTesting::_TensorQueue")
class FakeTensorQueue:
def __init__(self, queue):
self.queue = queue
@classmethod
def __obj_unflatten__(cls, flattened_ctx):
return cls(**dict(flattened_ctx))
def push(self, x):
self.queue.append(x)
def pop(self):
return self.queue.pop(0)
def size(self):
return len(self.queue)
@torch._library.register_fake_class("_TorchScriptTesting::_FlattenWithTensorOp")
class FakeFlatten:
def __init__(self, t):
self.t = t
def get(self):
return self.t
@classmethod
def __obj_unflatten__(cls, flattened_ctx):
return cls(**dict(flattened_ctx))
torch._dynamo.reset()
def tearDown(self):
torch._dynamo.reset()
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_script_object_input(self, backend):
if backend == "eager":
backend = EagerAndRecordGraphs()
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.check_tq_is_fake = True
def forward(self, tq, x):
tq.push(x.cos())
tq.push(x.sin())
x_sin = tq.pop() - tq.size()
return x_sin, tq
mod = Model()
tq1 = torch.classes._TorchScriptTesting._TensorQueue(
torch.empty(
0,
).fill_(-1)
)
tq2 = torch.classes._TorchScriptTesting._TensorQueue(
torch.empty(
0,
).fill_(-1)
)
tq3 = torch.classes._TorchScriptTesting._TensorQueue(
torch.empty(
0,
).fill_(-1)
)
tq4 = torch.classes._TorchScriptTesting._TensorQueue(
torch.empty(
0,
).fill_(-1)
)
x = torch.randn(2, 3)
ret = torch.compile(mod, backend=backend)(tq1, x)
eager_ret = mod(tq2, x)
_assertEqualSkipScriptObject(self, ret, eager_ret)
self.assertEqual(ret[1].size(), eager_ret[1].size())
self.assertEqual(ret[1].pop(), eager_ret[1].pop())
# Note that dynamo captured graph
# does not return L_tq_ as output. This is because it's able
# to detect that L_tq_ is an input therefore don't return
# it as graph output. Related logic is in dynamo/codegen.py
if backend == "eager":
self.assertExpectedInline(
backend.graphs[0].code.strip(),
"""\
def forward(self, L_tq_ : torch.ScriptObject, L_x_ : torch.Tensor):
l_tq_ = L_tq_
l_x_ = L_x_
cos = l_x_.cos()
call_torchbind = torch.ops.higher_order.call_torchbind(l_tq_, 'push', cos); cos = None
sin = l_x_.sin(); l_x_ = None
call_torchbind_1 = torch.ops.higher_order.call_torchbind(l_tq_, 'push', sin); sin = None
call_torchbind_2 = torch.ops.higher_order.call_torchbind(l_tq_, 'pop')
call_torchbind_3 = torch.ops.higher_order.call_torchbind(l_tq_, 'size'); l_tq_ = None
x_sin = call_torchbind_2 - 1; call_torchbind_2 = None
return (x_sin,)""",
)
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_script_object_input_guards(self, backend):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.check_tq_is_fake = True
def forward(self, tq, x):
tq.push(x.cos())
tq.push(x.sin())
x_sin = tq.pop() - tq.size()
return x_sin, tq
mod = Model()
cnt = torch._dynamo.testing.CompileCounterWithBackend(backend)
x = torch.randn(2, 3)
tq1 = _empty_tensor_queue()
torch.compile(mod, backend=cnt)(tq1, x)
self.assertEqual(cnt.frame_count, 1)
tq2 = _empty_tensor_queue()
for _ in range(10):
tq2.push(torch.randn(4, 5, requires_grad=False))
torch.compile(mod, backend=cnt)(tq2, x)
# Queue length change causes re-compile
self.assertEqual(cnt.frame_count, 2)
tq3 = _empty_tensor_queue()
tq3.push(torch.randn(2, 3, requires_grad=False))
torch.compile(mod, backend=cnt)(tq3, x)
# Tensor in queue changes shape causes re-compile
self.assertEqual(cnt.frame_count, 3)
tq4 = _empty_tensor_queue()
tq4.push(torch.randn(2, 3, requires_grad=False))
torch.compile(mod, backend=cnt)(tq4, x)
# No recompile
self.assertEqual(cnt.frame_count, 3)
tq5 = _empty_tensor_queue()
tq5.push(torch.randn(2, 3, requires_grad=True))
torch.compile(mod, backend=cnt)(tq5, x)
# Tensor in queue changes dispatch key causes re-compile
self.assertEqual(cnt.frame_count, 4)
tq6 = _empty_tensor_queue()
tq6.push(torch.randn(2, 3, requires_grad=True, dtype=torch.float64))
torch.compile(mod, backend=cnt)(tq6, x)
# Tensor in queue changes dtype causes re-compile
self.assertEqual(cnt.frame_count, 5)
def test_compile_script_object_input_automatic_dynamic_shape(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.check_tq_is_fake = True
def forward(self, tq, x):
tq.push(x.cos())
tq.push(x.sin())
x_sin = tq.pop() - tq.size()
return x_sin, tq
mod = Model()
cnt = torch._dynamo.testing.CompileCounter()
x = torch.randn(2, 3)
tq1 = _empty_tensor_queue()
tq1.push(torch.randn(2, 3, requires_grad=False))
torch.compile(mod, backend=cnt)(tq1, x)
self.assertEqual(cnt.frame_count, 1)
tq2 = _empty_tensor_queue()
# make first tensor's second dim dynamic
tq2.push(torch.randn(2, 4, requires_grad=False))
torch.compile(mod, backend=cnt)(tq2, x)
self.assertEqual(cnt.frame_count, 2)
tq3 = _empty_tensor_queue()
tq3.push(torch.randn(2, 5, requires_grad=False))
# should have no-recompilation
torch.compile(mod, backend=cnt)(tq3, x)
self.assertEqual(cnt.frame_count, 2)
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_error_on_input_aliasing_contents(self, backend):
if backend == "eager":
backend = EagerAndRecordGraphs()
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.check_tq_is_fake = True
def forward(self, tq, x):
return x.sin(), tq.pop().cos()
x = torch.randn(2, 3)
mod = Model()
tq1 = _empty_tensor_queue()
tq1.push(x)
with self.assertRaisesRegex(RuntimeError, "is aliasing"):
torch.compile(mod, backend=backend)(tq1, x)
@parametrize("backend", ["eager", "aot_eager"])
def test_compile_error_on_script_obj_setattr(self, backend):
if backend == "eager":
backend = EagerAndRecordGraphs()
def setattr_f(tq):
tq.a = 1
return tq
with self.assertRaisesRegex(
RuntimeError, "Weird method call on TorchScript object"
):
torch.compile(setattr_f, backend=backend)(_empty_tensor_queue())
@parametrize("backend", ["eager", "aot_eager"])
def test_compile_error_on_script_obj_missing_attr(self, backend):
if backend == "eager":
backend = EagerAndRecordGraphs()
def setattr_f(tq):
return tq._not_defined_attr
with self.assertRaisesRegex(
RuntimeError, "FakeScriptObject missing method implementation"
):
torch.compile(setattr_f, backend=backend)(_empty_tensor_queue())
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_body_aliasing_contents(self, backend):
if backend == "eager":
backend = EagerAndRecordGraphs()
def f(tq, x):
x1 = x.view(-1)
x2 = x.permute(1, 0)
tq.push(x1)
tq.push(x2)
return x1 - tq.size(), x2 + tq.size(), tq
x = torch.randn(2, 3)
_assertEqualScriptObject(
self,
f(_empty_tensor_queue(), x),
torch.compile(f, backend=backend)(_empty_tensor_queue(), x),
)
if not torch._dynamo.is_compiling() and backend == "eager":
self.assertExpectedInline(
backend.graphs[0].code.strip(),
"""\
def forward(self, L_x_ : torch.Tensor, L_tq_ : torch.ScriptObject):
l_x_ = L_x_
l_tq_ = L_tq_
x1 = l_x_.view(-1)
x2 = l_x_.permute(1, 0); l_x_ = None
call_torchbind = torch.ops.higher_order.call_torchbind(l_tq_, 'push', x1)
call_torchbind_1 = torch.ops.higher_order.call_torchbind(l_tq_, 'push', x2)
call_torchbind_2 = torch.ops.higher_order.call_torchbind(l_tq_, 'size')
sub = x1 - 2; x1 = None
call_torchbind_3 = torch.ops.higher_order.call_torchbind(l_tq_, 'size'); l_tq_ = None
add = x2 + 2; x2 = None
return (sub, add)""",
)
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_tensor_op_in_tensor_flatten(self, backend):
test_obj = torch.classes._TorchScriptTesting._FlattenWithTensorOp(
torch.randn(3, 2)
)
class TestMod(torch.nn.Module):
def forward(self, obj, x):
return obj.get() + x + obj.get().size(0)
mod = TestMod()
x = torch.randn(3, 1)
eager_out = mod(test_obj, x)
compiled_out = torch.compile(mod, backend=backend, fullgraph=True)(test_obj, x)
ep = torch.export.export(mod, (test_obj, x), strict=False).run_decompositions(
{}
)
self.assertExpectedInline(
ep.graph_module.code.strip(),
"""\
def forward(self, token, obj, x):
with_effects = torch.ops.higher_order.with_effects(token, torch.ops.higher_order.call_torchbind, obj, 'get'); token = None
getitem = with_effects[0]
getitem_1 = with_effects[1]; with_effects = None
add = torch.ops.aten.add.Tensor(getitem_1, x); getitem_1 = x = None
with_effects_1 = torch.ops.higher_order.with_effects(getitem, torch.ops.higher_order.call_torchbind, obj, 'get'); getitem = obj = None
getitem_2 = with_effects_1[0]; with_effects_1 = None
add_1 = torch.ops.aten.add.Tensor(add, 3); add = None
return (getitem_2, add_1)""", # noqa: B950
)
self.assertEqual(eager_out, compiled_out)
self.assertEqual(eager_out, ep.module()(test_obj, x))
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_error_on_non_fakified_method(self, backend):
if backend == "eager":
backend = EagerAndRecordGraphs()
def f(tq, x):
x1 = x.view(-1)
x2 = x.permute(1, 0)
tq.push(x1)
tq.push(x2)
# though real tensor queue implemented a method clone_queue,
# The fakified version doesn't.
flat_obj = tq.clone_queue()
return flat_obj
x = torch.randn(2, 3)
with self.assertRaisesRegex(
RuntimeError, "FakeScriptObject missing method implementation"
):
torch.compile(f, backend=backend)(_empty_tensor_queue(), x)
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_obj_as_hop_input(self, backend):
def f(tq, x):
def fn(tq, x):
tq.push(x)
return x.sin()
return wrap(fn, tq, x)
x = torch.randn(2, 3)
_assertEqualScriptObject(
self,
f(_empty_tensor_queue(), x),
torch.compile(f, backend=backend)(_empty_tensor_queue(), x),
)
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_obj_closure(self, backend):
def f(x):
def inner_f(x):
tq.push(x.sin())
inner_f(x)
return tq.pop(), tq
opt_f = torch.compile(f, backend="eager")
tq = _empty_tensor_queue()
x = torch.randn(3, 2)
_assertEqualScriptObject(self, f(x), opt_f(x))
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_global_obj(self, backend):
global _TENSOR_QUEUE_GLOBAL_TEST
_TENSOR_QUEUE_GLOBAL_TEST = _empty_tensor_queue()
def f(x):
_TENSOR_QUEUE_GLOBAL_TEST.push(x.sin())
return _TENSOR_QUEUE_GLOBAL_TEST.pop(), _TENSOR_QUEUE_GLOBAL_TEST
opt_f = torch.compile(f, backend=backend)
x = torch.randn(3, 2)
eager_ret = f(x)
opt_ret = opt_f(x)
_assertEqualScriptObject(self, eager_ret, opt_ret)
def test_compile_obj_graph_breaks(self):
cnt = torch._dynamo.testing.CompileCounter()
def f(tq, x):
tq.push(x.sin())
tq.push(x.sin())
torch._dynamo.graph_break()
tq.pop()
torch._dynamo.graph_break()
tq.push(x.cos() + tq.size())
torch._dynamo.graph_break()
tq.push(x.cos() - tq.size())
return x, tq.pop(), tq
opt_f = torch.compile(f, backend=cnt)
x = torch.randn(3, 2)
_assertEqualScriptObject(
self, f(_empty_tensor_queue(), x), opt_f(_empty_tensor_queue(), x)
)
self.assertEqual(cnt.frame_count, 4)
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_obj_attributes(self, backend):
if backend == "eager":
backend = EagerAndRecordGraphs()
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.tq = _empty_tensor_queue()
def forward(self, x):
self.tq.push(x)
return self.tq.pop()
x = torch.randn(2, 3)
opt_f = torch.compile(Model(), backend=backend)
_assertEqualScriptObject(self, Model()(x), opt_f(x))
if backend == "eager":
self.assertEqual(len(backend.graphs), 1)
# lifted as input. In the future, we would want to cosolidate this
# with non-strict behavior, where they're set as attributes.
self.assertExpectedInline(
backend.graphs[0].code.strip(),
"""\
def forward(self, L_self_tq : torch.ScriptObject, L_x_ : torch.Tensor):
l_self_tq = L_self_tq
l_x_ = L_x_
call_torchbind = torch.ops.higher_order.call_torchbind(l_self_tq, 'push', l_x_); l_x_ = None
call_torchbind_1 = torch.ops.higher_order.call_torchbind(l_self_tq, 'pop'); l_self_tq = None
return (call_torchbind_1,)""",
)
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_obj_torchbind_op(self, backend):
def f(tq, x):
torch.ops._TorchScriptTesting.queue_push(tq, x.cos())
torch.ops._TorchScriptTesting.queue_push(tq, x.cos() + 1)
torch.ops._TorchScriptTesting.queue_pop(tq)
torch.ops._TorchScriptTesting.queue_push(tq, x.sin())
return tq.pop(), tq.pop() + tq.size(), tq
opt_f = torch.compile(f, backend=backend)
x = torch.randn(2)
_assertEqualScriptObject(
self, f(_empty_tensor_queue(), x), opt_f(_empty_tensor_queue(), x)
)
@requires_cuda_and_triton
@parametrize("device", ["cpu", "cuda"])
@parametrize("backend", ["eager", "aot_eager", "inductor"])
def test_compile_obj_torchbind_op_with_autocast(self, backend, device):
def f(tq, x):
with torch.autocast(device_type=device):
torch.ops._TorchScriptTesting.queue_push(tq, x.cos())
torch.ops._TorchScriptTesting.queue_push(tq, x.cos() + 1)
torch.ops._TorchScriptTesting.queue_pop(tq)
torch.ops._TorchScriptTesting.queue_push(tq, x.sin())
return tq.pop(), tq.pop() + tq.size(), tq
opt_f = torch.compile(f, backend=backend)
x = torch.randn(2, device=device)
_assertEqualScriptObject(
self, f(_empty_tensor_queue(), x), opt_f(_empty_tensor_queue(), x)
)
@requires_cuda_and_triton
@parametrize("device", ["cpu", "cuda"])
def test_export_obj_torchbind_op_with_autocast(self, device):
class Mod(torch.nn.Module):
def forward(self, x, tq):
with torch.autocast(device_type=device):
torch.ops._TorchScriptTesting.queue_push(tq, x.cos())
torch.ops._TorchScriptTesting.queue_push(tq, x.cos() + 1)
torch.ops._TorchScriptTesting.queue_pop(tq)
torch.ops._TorchScriptTesting.queue_push(tq, x.sin())
return tq.pop(), tq.pop() + tq.size(), tq
x = torch.randn(2, device=device)
args = (x,)
mod = Mod()
ep = torch.export.export(mod, (x, _empty_tensor_queue()))
_assertEqualScriptObject(
self, ep.module()(x, _empty_tensor_queue()), mod(x, _empty_tensor_queue())
)
@skipIfTorchDynamo("torchbind not supported with dynamo yet")
| TestCompileTorchbind |
python | viewflow__viewflow | viewflow/utils.py | {
"start": 3700,
"end": 4558
} | class ____:
"""
A property that can be overridden.
The viewprop class is a descriptor that works similarly to the built-in
`property` decorator but allows its value to be overridden on instances
of the class it is used in.
"""
def __init__(self, func: Any):
self.__doc__ = getattr(func, "__doc__")
self.fget = func
def __get__(self, obj: Optional[Any], objtype: Optional[Type[Any]] = None) -> Any:
if obj is None:
return self
if self.fget.__name__ not in obj.__dict__:
obj.__dict__[self.fget.__name__] = self.fget(obj)
return obj.__dict__[self.fget.__name__]
def __set__(self, obj: Any, value: Any) -> None:
obj.__dict__[self.fget.__name__] = value
def __repr__(self) -> str:
return "<view_property func={}>".format(self.fget)
| viewprop |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/storage_tests/test_upath_io_manager.py | {
"start": 15540,
"end": 22210
} | class ____(dg.ConfigurableIOManager, dg.UPathIOManager):
base_dir: str = PydanticField(None, description="Base directory for storing files.") # type: ignore
_base_path: UPath = PrivateAttr()
def setup_for_execution(self, context: InitResourceContext) -> None:
self._base_path = UPath(self.base_dir)
def dump_to_path(self, context: OutputContext, obj: Any, path: UPath):
with path.open("w") as file:
json.dump(obj, file)
async def load_from_path(self, context: InputContext, path: UPath) -> Any:
fs = self.get_async_filesystem(path)
if inspect.iscoroutinefunction(fs.open_async):
# S3FileSystem has this interface
file = await fs.open_async(str(path), "rb")
data = await file.read()
else:
# AsyncLocalFileSystem has this interface
async with fs.open_async(str(path), "rb") as file:
data = await file.read()
return json.loads(data)
@staticmethod
def get_async_filesystem(path: "Path") -> AsyncFileSystem:
"""A helper method, is useful inside an async `load_from_path`.
The returned `fsspec` FileSystem will have async IO methods.
https://filesystem-spec.readthedocs.io/en/latest/async.html.
"""
import morefs.asyn_local
if isinstance(path, UPath):
so = path.fs.storage_options.copy()
cls = type(path.fs)
if cls is fsspec.implementations.local.LocalFileSystem:
cls = morefs.asyn_local.AsyncLocalFileSystem
so["asynchronous"] = True
return cls(**so)
elif isinstance(path, Path):
return morefs.asyn_local.AsyncLocalFileSystem()
else:
raise dg.DagsterInvariantViolationError(
f"Path type {type(path)} is not supported by the UPathIOManager"
)
@pytest.mark.parametrize("json_data", [0, 0.0, [0, 1, 2], {"a": 0}, [{"a": 0}, {"b": 1}, {"c": 2}]])
def test_upath_io_manager_async_load_from_path(tmp_path: Path, json_data: Any):
manager = AsyncJSONIOManager(base_dir=str(tmp_path))
@dg.asset(io_manager_def=manager)
def non_partitioned_asset():
return json_data
result = dg.materialize([non_partitioned_asset])
assert result.output_for_node("non_partitioned_asset") == json_data
@dg.asset(partitions_def=dg.StaticPartitionsDefinition(["a", "b"]), io_manager_def=manager)
def partitioned_asset(context: OpExecutionContext):
return context.partition_key
result = dg.materialize([partitioned_asset], partition_key="a")
assert result.output_for_node("partitioned_asset") == "a"
def test_upath_io_manager_async_multiple_time_partitions(
tmp_path: Path,
daily: DailyPartitionsDefinition,
start: datetime,
):
manager = AsyncJSONIOManager(base_dir=str(tmp_path))
@dg.asset(partitions_def=daily, io_manager_def=manager)
def upstream_asset(context: AssetExecutionContext) -> str:
return context.partition_key
@dg.asset(
partitions_def=daily,
io_manager_def=manager,
ins={
"upstream_asset": dg.AssetIn(
partition_mapping=dg.TimeWindowPartitionMapping(start_offset=-1)
)
},
)
def downstream_asset(upstream_asset: dict[str, str]):
return upstream_asset
for days in range(2):
dg.materialize(
[upstream_asset],
partition_key=(start + timedelta(days=days)).strftime(daily.fmt),
)
result = dg.materialize(
[upstream_asset.to_source_asset(), downstream_asset],
partition_key=(start + timedelta(days=1)).strftime(daily.fmt),
)
downstream_asset_data = result.output_for_node("downstream_asset", "result")
assert len(downstream_asset_data) == 2, "downstream day should map to 2 upstream days"
def test_upath_io_manager_async_fail_on_missing_partitions(
tmp_path: Path,
daily: DailyPartitionsDefinition,
start: datetime,
):
manager = AsyncJSONIOManager(base_dir=str(tmp_path))
@dg.asset(partitions_def=daily, io_manager_def=manager)
def upstream_asset(context: AssetExecutionContext) -> str:
return context.partition_key
@dg.asset(
partitions_def=daily,
io_manager_def=manager,
ins={
"upstream_asset": dg.AssetIn(
partition_mapping=dg.TimeWindowPartitionMapping(start_offset=-1)
)
},
)
def downstream_asset(upstream_asset: dict[str, str]):
return upstream_asset
dg.materialize(
[upstream_asset],
partition_key=start.strftime(daily.fmt),
)
with pytest.raises(RuntimeError):
dg.materialize(
[upstream_asset.to_source_asset(), downstream_asset],
partition_key=(start + timedelta(days=4)).strftime(daily.fmt),
)
def test_upath_io_manager_async_allow_missing_partitions(
tmp_path: Path,
daily: DailyPartitionsDefinition,
start: datetime,
):
manager = AsyncJSONIOManager(base_dir=str(tmp_path))
@dg.asset(partitions_def=daily, io_manager_def=manager)
def upstream_asset(context: AssetExecutionContext) -> str:
return context.partition_key
@dg.asset(
partitions_def=daily,
io_manager_def=manager,
ins={
"upstream_asset": dg.AssetIn(
partition_mapping=dg.TimeWindowPartitionMapping(start_offset=-1),
metadata={"allow_missing_partitions": True},
)
},
)
def downstream_asset(upstream_asset: dict[str, str]):
return upstream_asset
dg.materialize(
[upstream_asset],
partition_key=start.strftime(daily.fmt),
)
result = dg.materialize(
[upstream_asset.to_source_asset(), downstream_asset],
partition_key=(start + timedelta(days=1)).strftime(daily.fmt),
)
downstream_asset_data = result.output_for_node("downstream_asset", "result")
assert len(downstream_asset_data) == 1, "1 partition should be missing"
def test_upath_can_transition_from_non_partitioned_to_partitioned(
tmp_path: Path, daily: DailyPartitionsDefinition, start: datetime
):
my_io_manager = PickleIOManager(UPath(tmp_path))
@dg.asset
def my_asset(): # type: ignore
return 1
assert dg.materialize([my_asset], resources={"io_manager": my_io_manager}).success
@dg.asset(partitions_def=daily)
def my_asset():
return 1
assert dg.materialize(
[my_asset], resources={"io_manager": my_io_manager}, partition_key=start.strftime(daily.fmt)
).success
| AsyncJSONIOManager |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py | {
"start": 66703,
"end": 80818
} | class ____:
def test_const_covered_neighbors(self):
G1 = nx.DiGraph([(0, 1), (1, 2), (0, 3), (2, 3)])
G2 = nx.DiGraph([("a", "b"), ("b", "c"), ("a", "k"), ("c", "k")])
gparams = _GraphParameters(G1, G2, None, None, None, None, None)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c"},
{"a": 0, "b": 1, "c": 2},
None,
None,
None,
None,
None,
None,
None,
None,
)
u, v = 3, "k"
assert _consistent_PT(u, v, gparams, sparams)
def test_const_no_covered_neighbors(self):
G1 = nx.DiGraph([(0, 1), (1, 2), (3, 4), (3, 5)])
G2 = nx.DiGraph([("a", "b"), ("b", "c"), ("k", "w"), ("k", "z")])
gparams = _GraphParameters(G1, G2, None, None, None, None, None)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c"},
{"a": 0, "b": 1, "c": 2},
None,
None,
None,
None,
None,
None,
None,
None,
)
u, v = 3, "k"
assert _consistent_PT(u, v, gparams, sparams)
def test_const_mixed_covered_uncovered_neighbors(self):
G1 = nx.DiGraph([(0, 1), (1, 2), (3, 0), (3, 2), (3, 4), (3, 5)])
G2 = nx.DiGraph(
[("a", "b"), ("b", "c"), ("k", "a"), ("k", "c"), ("k", "w"), ("k", "z")]
)
gparams = _GraphParameters(G1, G2, None, None, None, None, None)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c"},
{"a": 0, "b": 1, "c": 2},
None,
None,
None,
None,
None,
None,
None,
None,
)
u, v = 3, "k"
assert _consistent_PT(u, v, gparams, sparams)
def test_const_fail_cases(self):
G1 = nx.DiGraph(
[
(0, 1),
(2, 1),
(10, 0),
(10, 3),
(10, 4),
(5, 10),
(10, 6),
(1, 4),
(5, 3),
]
)
G2 = nx.DiGraph(
[
("a", "b"),
("c", "b"),
("k", "a"),
("k", "d"),
("k", "e"),
("f", "k"),
("k", "g"),
("b", "e"),
("f", "d"),
]
)
gparams = _GraphParameters(G1, G2, None, None, None, None, None)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c", 3: "d"},
{"a": 0, "b": 1, "c": 2, "d": 3},
None,
None,
None,
None,
None,
None,
None,
None,
)
u, v = 10, "k"
assert _consistent_PT(u, v, gparams, sparams)
# Delete one uncovered neighbor of u. Notice how it still passes the
# test. Two reasons for this:
# 1. If u, v had different degrees from the beginning, they wouldn't
# be selected as candidates in the first place.
# 2. Even if they are selected, consistency is basically
# 1-look-ahead, meaning that we take into consideration the
# relation of the candidates with their mapped neighbors.
# The node we deleted is not a covered neighbor.
# Such nodes will be checked by the cut_PT function, which is
# basically the 2-look-ahead, checking the relation of the
# candidates with T1, T2 (in which belongs the node we just deleted).
G1.remove_node(6)
assert _consistent_PT(u, v, gparams, sparams)
# Add one more covered neighbor of u in G1
G1.add_edge(u, 2)
assert not _consistent_PT(u, v, gparams, sparams)
# Compensate in G2
G2.add_edge(v, "c")
assert _consistent_PT(u, v, gparams, sparams)
# Add one more covered neighbor of v in G2
G2.add_edge(v, "x")
G1.add_node(7)
sparams.mapping.update({7: "x"})
sparams.reverse_mapping.update({"x": 7})
assert not _consistent_PT(u, v, gparams, sparams)
# Compensate in G1
G1.add_edge(u, 7)
assert _consistent_PT(u, v, gparams, sparams)
def test_cut_inconsistent_labels(self):
G1 = nx.DiGraph(
[
(0, 1),
(2, 1),
(10, 0),
(10, 3),
(10, 4),
(5, 10),
(10, 6),
(1, 4),
(5, 3),
]
)
G2 = nx.DiGraph(
[
("a", "b"),
("c", "b"),
("k", "a"),
("k", "d"),
("k", "e"),
("f", "k"),
("k", "g"),
("b", "e"),
("f", "d"),
]
)
l1 = {n: "blue" for n in G1.nodes()}
l2 = {n: "blue" for n in G2.nodes()}
l1.update({5: "green"}) # Change the label of one neighbor of u
gparams = _GraphParameters(
G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None
)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c", 3: "d"},
{"a": 0, "b": 1, "c": 2, "d": 3},
None,
None,
None,
None,
None,
None,
None,
None,
)
u, v = 10, "k"
assert _cut_PT(u, v, gparams, sparams)
def test_cut_consistent_labels(self):
G1 = nx.DiGraph(
[
(0, 1),
(2, 1),
(10, 0),
(10, 3),
(10, 4),
(5, 10),
(10, 6),
(1, 4),
(5, 3),
]
)
G2 = nx.DiGraph(
[
("a", "b"),
("c", "b"),
("k", "a"),
("k", "d"),
("k", "e"),
("f", "k"),
("k", "g"),
("b", "e"),
("f", "d"),
]
)
l1 = {n: "blue" for n in G1.nodes()}
l2 = {n: "blue" for n in G2.nodes()}
gparams = _GraphParameters(
G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None
)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c", 3: "d"},
{"a": 0, "b": 1, "c": 2, "d": 3},
{4},
{5, 10},
{6},
None,
{"e"},
{"f", "k"},
{"g"},
None,
)
u, v = 10, "k"
assert not _cut_PT(u, v, gparams, sparams)
def test_cut_same_labels(self):
G1 = nx.DiGraph(
[
(0, 1),
(2, 1),
(10, 0),
(10, 3),
(10, 4),
(5, 10),
(10, 6),
(1, 4),
(5, 3),
]
)
mapped = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 10: "k"}
G2 = nx.relabel_nodes(G1, mapped)
l1 = {n: "blue" for n in G1.nodes()}
l2 = {n: "blue" for n in G2.nodes()}
gparams = _GraphParameters(
G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None
)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c", 3: "d"},
{"a": 0, "b": 1, "c": 2, "d": 3},
{4},
{5, 10},
{6},
None,
{"e"},
{"f", "k"},
{"g"},
None,
)
u, v = 10, "k"
assert not _cut_PT(u, v, gparams, sparams)
# Change intersection between G1[u] and T1_out, so it's not the same as
# the one between G2[v] and T2_out
G1.remove_edge(u, 4)
assert _cut_PT(u, v, gparams, sparams)
# Compensate in G2
G2.remove_edge(v, mapped[4])
assert not _cut_PT(u, v, gparams, sparams)
# Change intersection between G1[u] and T1_in, so it's not the same as
# the one between G2[v] and T2_in
G1.remove_edge(5, u)
assert _cut_PT(u, v, gparams, sparams)
# Compensate in G2
G2.remove_edge(mapped[5], v)
assert not _cut_PT(u, v, gparams, sparams)
# Change intersection between G2[v] and T2_tilde, so it's not the same
# as the one between G1[u] and T1_tilde
G2.remove_edge(v, mapped[6])
assert _cut_PT(u, v, gparams, sparams)
# Compensate in G1
G1.remove_edge(u, 6)
assert not _cut_PT(u, v, gparams, sparams)
# Add disconnected nodes, which will form the new Ti_tilde
G1.add_nodes_from([6, 7, 8])
G2.add_nodes_from(["g", "y", "z"])
sparams.T1_tilde.update({6, 7, 8})
sparams.T2_tilde.update({"g", "y", "z"})
l1 = {n: "blue" for n in G1.nodes()}
l2 = {n: "blue" for n in G2.nodes()}
gparams = _GraphParameters(
G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None
)
assert not _cut_PT(u, v, gparams, sparams)
def test_cut_different_labels(self):
G1 = nx.DiGraph(
[
(0, 1),
(1, 2),
(14, 1),
(0, 4),
(1, 5),
(2, 6),
(3, 7),
(3, 6),
(10, 4),
(4, 9),
(6, 10),
(20, 9),
(20, 15),
(20, 12),
(20, 11),
(12, 13),
(11, 13),
(20, 8),
(20, 3),
(20, 5),
(0, 20),
]
)
mapped = {
0: "a",
1: "b",
2: "c",
3: "d",
4: "e",
5: "f",
6: "g",
7: "h",
8: "i",
9: "j",
10: "k",
11: "l",
12: "m",
13: "n",
14: "o",
15: "p",
20: "x",
}
G2 = nx.relabel_nodes(G1, mapped)
l1 = {n: "none" for n in G1.nodes()}
l2 = {}
l1.update(
{
9: "blue",
15: "blue",
12: "blue",
11: "green",
3: "green",
8: "red",
0: "red",
5: "yellow",
}
)
l2.update({mapped[n]: l for n, l in l1.items()})
gparams = _GraphParameters(
G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None
)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c", 3: "d"},
{"a": 0, "b": 1, "c": 2, "d": 3},
{4, 5, 6, 7, 20},
{14, 20},
{9, 10, 15, 12, 11, 13, 8},
None,
{"e", "f", "g", "x"},
{"o", "x"},
{"j", "k", "l", "m", "n", "i", "p"},
None,
)
u, v = 20, "x"
assert not _cut_PT(u, v, gparams, sparams)
# Change the orientation of the labels on neighbors of u compared to
# neighbors of v. Leave the structure intact
l1.update({9: "red"})
assert _cut_PT(u, v, gparams, sparams)
# compensate in G2
l2.update({mapped[9]: "red"})
assert not _cut_PT(u, v, gparams, sparams)
# Change the intersection of G1[u] and T1_out
G1.add_edge(u, 4)
assert _cut_PT(u, v, gparams, sparams)
# Same for G2[v] and T2_out
G2.add_edge(v, mapped[4])
assert not _cut_PT(u, v, gparams, sparams)
# Change the intersection of G1[u] and T1_in
G1.add_edge(u, 14)
assert _cut_PT(u, v, gparams, sparams)
# Same for G2[v] and T2_in
G2.add_edge(v, mapped[14])
assert not _cut_PT(u, v, gparams, sparams)
# Change the intersection of G2[v] and T2_tilde
G2.remove_edge(v, mapped[8])
assert _cut_PT(u, v, gparams, sparams)
# Same for G1[u] and T1_tilde
G1.remove_edge(u, 8)
assert not _cut_PT(u, v, gparams, sparams)
# Place 8 and mapped[8] in T1 and T2 respectively, by connecting it to covered nodes
G1.add_edge(8, 3)
G2.add_edge(mapped[8], mapped[3])
sparams.T1.add(8)
sparams.T2.add(mapped[8])
sparams.T1_tilde.remove(8)
sparams.T2_tilde.remove(mapped[8])
assert not _cut_PT(u, v, gparams, sparams)
# Remove neighbor of u from T1
G1.remove_node(5)
l1.pop(5)
sparams.T1.remove(5)
assert _cut_PT(u, v, gparams, sparams)
# Same in G2
G2.remove_node(mapped[5])
l2.pop(mapped[5])
sparams.T2.remove(mapped[5])
assert not _cut_PT(u, v, gparams, sparams)
def test_predecessor_T1_in_fail(self):
G1 = nx.DiGraph(
[(0, 1), (0, 3), (4, 0), (1, 5), (5, 2), (3, 6), (4, 6), (6, 5)]
)
mapped = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g"}
G2 = nx.relabel_nodes(G1, mapped)
l1 = {n: "blue" for n in G1.nodes()}
l2 = {n: "blue" for n in G2.nodes()}
gparams = _GraphParameters(
G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None
)
sparams = _StateParameters(
{0: "a", 1: "b", 2: "c"},
{"a": 0, "b": 1, "c": 2},
{3, 5},
{4, 5},
{6},
None,
{"d", "f"},
{"f"}, # mapped[4] is missing from T2_in
{"g"},
None,
)
u, v = 6, "g"
assert _cut_PT(u, v, gparams, sparams)
sparams.T2_in.add("e")
assert not _cut_PT(u, v, gparams, sparams)
| TestDiGraphISOFeasibility |
python | django__django | tests/template_tests/utils.py | {
"start": 3951,
"end": 4021
} | class ____:
def __str__(self):
return "you & me"
| UnsafeClass |
python | matplotlib__matplotlib | lib/matplotlib/quiver.py | {
"start": 35729,
"end": 48555
} | class ____(mcollections.PolyCollection):
"""
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`!_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`!_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
"""
# This may be an abuse of polygons here to render what is essentially maybe
# 1 triangle and a series of lines. It works fine as far as I can tell
# however.
@_docstring.interpd
def __init__(self, ax, *args,
pivot='tip', length=7, barbcolor=None, flagcolor=None,
sizes=None, fill_empty=False, barb_increments=None,
rounding=True, flip_barb=False, **kwargs):
"""
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pyplot interface documentation:
%(barbs_doc)s
"""
self.sizes = sizes or dict()
self.fill_empty = fill_empty
self.barb_increments = barb_increments or dict()
self.rounding = rounding
self.flip = np.atleast_1d(flip_barb)
transform = kwargs.pop('transform', ax.transData)
self._pivot = pivot
self._length = length
# Flagcolor and barbcolor provide convenience parameters for
# setting the facecolor and edgecolor, respectively, of the barb
# polygon. We also work here to make the flag the same color as the
# rest of the barb by default
if None in (barbcolor, flagcolor):
kwargs['edgecolors'] = 'face'
if flagcolor:
kwargs['facecolors'] = flagcolor
elif barbcolor:
kwargs['facecolors'] = barbcolor
else:
# Set to facecolor passed in or default to black
kwargs.setdefault('facecolors', 'k')
else:
kwargs['edgecolors'] = barbcolor
kwargs['facecolors'] = flagcolor
# Explicitly set a line width if we're not given one, otherwise
# polygons are not outlined and we get no barbs
if 'linewidth' not in kwargs and 'lw' not in kwargs:
kwargs['linewidth'] = 1
# Parse out the data arrays from the various configurations supported
x, y, u, v, c = _parse_args(*args, caller_name='barbs')
self.x = x
self.y = y
xy = np.column_stack((x, y))
# Make a collection
barb_size = self._length ** 2 / 4 # Empirically determined
super().__init__(
[], (barb_size,), offsets=xy, offset_transform=transform, **kwargs)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
"""
Find how many of each of the tail pieces is necessary.
Parameters
----------
mag : `~numpy.ndarray`
Vector magnitudes; must be non-negative (and an actual ndarray).
rounding : bool, default: True
Whether to round or to truncate to the nearest half-barb.
half, full, flag : float, defaults: 5, 10, 50
Increments for a half-barb, a barb, and a flag.
Returns
-------
n_flags, n_barbs : int array
For each entry in *mag*, the number of flags and barbs.
half_flag : bool array
For each entry in *mag*, whether a half-barb is needed.
empty_flag : bool array
For each entry in *mag*, whether nothing is drawn.
"""
# If rounding, round to the nearest multiple of half, the smallest
# increment
if rounding:
mag = half * np.around(mag / half)
n_flags, mag = divmod(mag, flag)
n_barb, mag = divmod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (n_flags > 0) | (n_barb > 0))
return n_flags.astype(int), n_barb.astype(int), half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
"""
Create the wind barbs.
Parameters
----------
u, v
Components of the vector in the x and y directions, respectively.
nflags, nbarbs, half_barb, empty_flag
Respectively, the number of flags, number of barbs, flag for
half a barb, and flag for empty barb, ostensibly obtained from
:meth:`_find_tails`.
length
The length of the barb staff in points.
pivot : {"tip", "middle"} or number
The point on the barb around which the entire barb should be
rotated. If a number, the start of the barb is shifted by that
many points from the origin.
sizes : dict
Coefficients specifying the ratio of a given feature to the length
of the barb. These features include:
- *spacing*: space between features (flags, full/half barbs).
- *height*: distance from shaft of top of a flag or full barb.
- *width*: width of a flag, twice the width of a full barb.
- *emptybarb*: radius of the circle used for low magnitudes.
fill_empty : bool
Whether the circle representing an empty barb should be filled or
not (this changes the drawing of the polygon).
flip : list of bool
Whether the features should be flipped to the other side of the
barb (useful for winds in the southern hemisphere).
Returns
-------
list of arrays of vertices
Polygon vertices for each of the wind barbs. These polygons have
been rotated to properly align with the vector direction.
"""
# These control the spacing and size of barb elements relative to the
# length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
# Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length / 2.)
endx = 0.0
try:
endy = float(pivot)
except ValueError:
endy = pivot_points[pivot.lower()]
# Get the appropriate angle for the vector components. The offset is
# due to the way the barb is initially drawn, going down the y-axis.
# This makes sense in a meteorological mode of thinking since there 0
# degrees corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi / 2)
# Used for low magnitude. We just get the vertices, so if we make it
# out here, it can be reused. The center set here should put the
# center of the circle at the location(offset), rather than at the
# same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
# If we don't want the empty one filled, we make a degenerate
# polygon that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
# If the vector magnitude is too weak to draw anything, plot an
# empty circle instead
if empty_flag[index]:
# We can skip the transform since the circle has no preferred
# orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
# Handle if this barb should be flipped
barb_height = -full_height if flip[index] else full_height
# Add vertices for each flag
for i in range(nflags[index]):
# The spacing that works for the barbs is a little to much for
# the flags, but this only occurs when we have more than 1
# flag.
if offset != length:
offset += spacing / 2.
poly_verts.extend(
[[endx, endy + offset],
[endx + barb_height, endy - full_width / 2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
# Add vertices for each barb. These really are lines, but works
# great adding 3 vertices that basically pull the polygon out and
# back down the line
for i in range(nbarbs[index]):
poly_verts.extend(
[(endx, endy + offset),
(endx + barb_height, endy + offset + full_width / 2),
(endx, endy + offset)])
offset -= spacing
# Add the vertices for half a barb, if needed
if half_barb[index]:
# If the half barb is the first on the staff, traditionally it
# is offset from the end to make it easy to distinguish from a
# barb with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend(
[(endx, endy + offset),
(endx + barb_height / 2, endy + offset + full_width / 4),
(endx, endy + offset)])
# Rotate the barb according the angle. Making the barb first and
# then rotating it made the math for drawing the barb really easy.
# Also, the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
def set_UVC(self, U, V, C=None):
# We need to ensure we have a copy, not a reference to an array that
# might change before draw().
self.u = ma.masked_invalid(U, copy=True).ravel()
self.v = ma.masked_invalid(V, copy=True).ravel()
# Flip needs to have the same number of entries as everything else.
# Use broadcast_to to avoid a bloated array of identical values.
# (can't rely on actual broadcasting)
if len(self.flip) == 1:
flip = np.broadcast_to(self.flip, self.u.shape)
else:
flip = self.flip
if C is not None:
c = ma.masked_invalid(C, copy=True).ravel()
x, y, u, v, c, flip = cbook.delete_masked_points(
self.x.ravel(), self.y.ravel(), self.u, self.v, c,
flip.ravel())
_check_consistent_shapes(x, y, u, v, c, flip)
else:
x, y, u, v, flip = cbook.delete_masked_points(
self.x.ravel(), self.y.ravel(), self.u, self.v, flip.ravel())
_check_consistent_shapes(x, y, u, v, flip)
magnitude = np.hypot(u, v)
flags, barbs, halves, empty = self._find_tails(
magnitude, self.rounding, **self.barb_increments)
# Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes,
self.fill_empty, flip)
self.set_verts(plot_barbs)
# Set the color array
if C is not None:
self.set_array(c)
# Update the offsets in case the masked data changed
xy = np.column_stack((x, y))
self._offsets = xy
self.stale = True
def set_offsets(self, xy):
"""
Set the offsets for the barb polygons. This saves the offsets passed
in and masks them as appropriate for the existing U/V data.
Parameters
----------
xy : sequence of pairs of floats
"""
self.x = xy[:, 0]
self.y = xy[:, 1]
x, y, u, v = cbook.delete_masked_points(
self.x.ravel(), self.y.ravel(), self.u, self.v)
_check_consistent_shapes(x, y, u, v)
xy = np.column_stack((x, y))
super().set_offsets(xy)
self.stale = True
| Barbs |
python | Farama-Foundation__Gymnasium | gymnasium/envs/mujoco/mujoco_rendering.py | {
"start": 10714,
"end": 24069
} | class ____(BaseRender):
"""Class for window rendering in all MuJoCo environments."""
def __init__(
self,
model: "mujoco.MjModel",
data: "mujoco.MjData",
width: int | None = None,
height: int | None = None,
max_geom: int = 1000,
visual_options: dict[int, bool] = {},
):
glfw.init()
self._button_left_pressed = False
self._button_right_pressed = False
self._last_mouse_x = 0
self._last_mouse_y = 0
self._paused = False
self._transparent = False
self._contacts = False
self._render_every_frame = True
self._image_idx = 0
self._image_path = "/tmp/frame_%07d.png"
self._time_per_render = 1 / 60.0
self._run_speed = 1.0
self._loop_count = 0
self._advance_by_one_step = False
self._hide_menu = False
monitor_width, monitor_height = glfw.get_video_mode(
glfw.get_primary_monitor()
).size
width = monitor_width // 2 if width is None else width
height = monitor_height // 2 if height is None else height
glfw.window_hint(glfw.VISIBLE, 1)
self.window = glfw.create_window(width, height, "mujoco", None, None)
self.width, self.height = glfw.get_framebuffer_size(self.window)
window_width, _ = glfw.get_window_size(self.window)
self._scale = self.width * 1.0 / window_width
# set callbacks
glfw.set_cursor_pos_callback(self.window, self._cursor_pos_callback)
glfw.set_mouse_button_callback(self.window, self._mouse_button_callback)
glfw.set_scroll_callback(self.window, self._scroll_callback)
glfw.set_key_callback(self.window, self._key_callback)
super().__init__(model, data, width, height, max_geom, visual_options)
glfw.swap_interval(1)
def _set_mujoco_buffer(self):
mujoco.mjr_setBuffer(mujoco.mjtFramebuffer.mjFB_WINDOW, self.con)
def make_context_current(self):
glfw.make_context_current(self.window)
def free(self):
"""
Safely frees the OpenGL context and destroys the GLFW window,
handling potential issues during interpreter shutdown or resource cleanup.
"""
try:
if self.window:
if glfw.get_current_context() == self.window:
glfw.make_context_current(None)
glfw.destroy_window(self.window)
self.window = None
except AttributeError:
# Handle cases where attributes are missing due to improper environment closure
warn(
"Environment was not properly closed using 'env.close()'. Please ensure to close the environment explicitly. "
"GLFW module or dependencies are unloaded. Window cleanup might not have completed."
)
def __del__(self):
"""Eliminate all of the OpenGL glfw contexts and windows"""
self.free()
def render(self):
"""
Renders the environment geometries in the OpenGL glfw window:
1. Create the overlay for the left side panel menu.
2. Update the geometries used for rendering based on the current state of the model - `mujoco.mjv_updateScene()`.
3. Add markers to scene, these are additional geometries to include in the model, i.e arrows, https://mujoco.readthedocs.io/en/latest/APIreference.html?highlight=arrow#mjtgeom.
These markers are added with the `add_marker()` method before rendering.
4. Render the 3D scene to the window context - `mujoco.mjr_render()`.
5. Render overlays in the window context - `mujoco.mjr_overlay()`.
6. Swap front and back buffer, https://www.glfw.org/docs/3.3/quick.html.
7. Poll events like mouse clicks or keyboard input.
"""
# mjv_updateScene, mjr_render, mjr_overlay
def update():
# fill overlay items
self._create_overlay()
render_start = time.time()
if self.window is None:
return
elif glfw.window_should_close(self.window):
glfw.destroy_window(self.window)
glfw.terminate()
self.viewport.width, self.viewport.height = glfw.get_framebuffer_size(
self.window
)
# update scene
mujoco.mjv_updateScene(
self.model,
self.data,
self.vopt,
mujoco.MjvPerturb(),
self.cam,
mujoco.mjtCatBit.mjCAT_ALL.value,
self.scn,
)
# marker items
for marker in self._markers:
self._add_marker_to_scene(marker)
# render
mujoco.mjr_render(self.viewport, self.scn, self.con)
# overlay items
if not self._hide_menu:
for gridpos, [t1, t2] in self._overlays.items():
mujoco.mjr_overlay(
mujoco.mjtFontScale.mjFONTSCALE_150,
gridpos,
self.viewport,
t1,
t2,
self.con,
)
glfw.swap_buffers(self.window)
glfw.poll_events()
self._time_per_render = 0.9 * self._time_per_render + 0.1 * (
time.time() - render_start
)
if self._paused:
while self._paused:
update()
if self._advance_by_one_step:
self._advance_by_one_step = False
break
else:
self._loop_count += self.model.opt.timestep / (
self._time_per_render * self._run_speed
)
if self._render_every_frame:
self._loop_count = 1
while self._loop_count > 0:
update()
self._loop_count -= 1
# clear overlay
self._overlays.clear()
# clear markers
self._markers.clear()
def close(self):
self.free()
glfw.terminate()
def _key_callback(self, window, key: int, scancode, action: int, mods):
if action != glfw.RELEASE:
return
# Switch cameras
elif key == glfw.KEY_TAB:
self.cam.fixedcamid += 1
self.cam.type = mujoco.mjtCamera.mjCAMERA_FIXED
if self.cam.fixedcamid >= self.model.ncam:
self.cam.fixedcamid = -1
self.cam.type = mujoco.mjtCamera.mjCAMERA_FREE
# Pause simulation
elif key == glfw.KEY_SPACE and self._paused is not None:
self._paused = not self._paused
# Advances simulation by one step.
elif key == glfw.KEY_RIGHT and self._paused is not None:
self._advance_by_one_step = True
self._paused = True
# Slows down simulation
elif key == glfw.KEY_S:
self._run_speed /= 2.0
# Speeds up simulation
elif key == glfw.KEY_F:
self._run_speed *= 2.0
# Turn off / turn on rendering every frame.
elif key == glfw.KEY_D:
self._render_every_frame = not self._render_every_frame
# Capture screenshot
elif key == glfw.KEY_T:
img = np.zeros(
(
glfw.get_framebuffer_size(self.window)[1],
glfw.get_framebuffer_size(self.window)[0],
3,
),
dtype=np.uint8,
)
mujoco.mjr_readPixels(img, None, self.viewport, self.con)
imageio.imwrite(self._image_path % self._image_idx, np.flipud(img))
self._image_idx += 1
# Display contact forces
elif key == glfw.KEY_C:
self._contacts = not self._contacts
self.vopt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTPOINT] = self._contacts
self.vopt.flags[mujoco.mjtVisFlag.mjVIS_CONTACTFORCE] = self._contacts
# Display coordinate frames
elif key == glfw.KEY_E:
self.vopt.frame = 1 - self.vopt.frame
# Hide overlay menu
elif key == glfw.KEY_H:
self._hide_menu = not self._hide_menu
# Make transparent
elif key == glfw.KEY_R:
self._transparent = not self._transparent
if self._transparent:
self.model.geom_rgba[:, 3] /= 5.0
else:
self.model.geom_rgba[:, 3] *= 5.0
# Geom group visibility
elif key in (glfw.KEY_0, glfw.KEY_1, glfw.KEY_2, glfw.KEY_3, glfw.KEY_4):
self.vopt.geomgroup[key - glfw.KEY_0] ^= 1
# Quit
if key == glfw.KEY_ESCAPE:
print("Pressed ESC")
print("Quitting.")
glfw.destroy_window(self.window)
glfw.terminate()
def _cursor_pos_callback(
self, window: "glfw.LP__GLFWwindow", xpos: float, ypos: float
):
if not (self._button_left_pressed or self._button_right_pressed):
return
mod_shift = (
glfw.get_key(window, glfw.KEY_LEFT_SHIFT) == glfw.PRESS
or glfw.get_key(window, glfw.KEY_RIGHT_SHIFT) == glfw.PRESS
)
if self._button_right_pressed:
action = (
mujoco.mjtMouse.mjMOUSE_MOVE_H
if mod_shift
else mujoco.mjtMouse.mjMOUSE_MOVE_V
)
elif self._button_left_pressed:
action = (
mujoco.mjtMouse.mjMOUSE_ROTATE_H
if mod_shift
else mujoco.mjtMouse.mjMOUSE_ROTATE_V
)
else:
action = mujoco.mjtMouse.mjMOUSE_ZOOM
dx = int(self._scale * xpos) - self._last_mouse_x
dy = int(self._scale * ypos) - self._last_mouse_y
width, height = glfw.get_framebuffer_size(window)
mujoco.mjv_moveCamera(
self.model, action, dx / width, dy / height, self.scn, self.cam
)
self._last_mouse_x = int(self._scale * xpos)
self._last_mouse_y = int(self._scale * ypos)
def _mouse_button_callback(self, window: "glfw.LP__GLFWwindow", button, act, mods):
self._button_left_pressed = (
glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_LEFT) == glfw.PRESS
)
self._button_right_pressed = (
glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_RIGHT) == glfw.PRESS
)
x, y = glfw.get_cursor_pos(window)
self._last_mouse_x = int(self._scale * x)
self._last_mouse_y = int(self._scale * y)
def _scroll_callback(self, window, x_offset, y_offset: float):
mujoco.mjv_moveCamera(
self.model,
mujoco.mjtMouse.mjMOUSE_ZOOM,
0,
-0.05 * y_offset,
self.scn,
self.cam,
)
def _create_overlay(self):
topleft = mujoco.mjtGridPos.mjGRID_TOPLEFT
bottomleft = mujoco.mjtGridPos.mjGRID_BOTTOMLEFT
if self._render_every_frame:
self.add_overlay(topleft, "", "")
else:
self.add_overlay(
topleft,
"Run speed = %.3f x real time" % self._run_speed,
"[S]lower, [F]aster",
)
self.add_overlay(
topleft, "Ren[d]er every frame", "On" if self._render_every_frame else "Off"
)
self.add_overlay(
topleft,
"Switch camera (#cams = %d)" % (self.model.ncam + 1),
"[Tab] (camera ID = %d)" % self.cam.fixedcamid,
)
self.add_overlay(topleft, "[C]ontact forces", "On" if self._contacts else "Off")
self.add_overlay(topleft, "T[r]ansparent", "On" if self._transparent else "Off")
if self._paused is not None:
if not self._paused:
self.add_overlay(topleft, "Stop", "[Space]")
else:
self.add_overlay(topleft, "Start", "[Space]")
self.add_overlay(
topleft, "Advance simulation by one step", "[right arrow]"
)
self.add_overlay(
topleft, "Referenc[e] frames", "On" if self.vopt.frame == 1 else "Off"
)
self.add_overlay(topleft, "[H]ide Menu", "")
if self._image_idx > 0:
fname = self._image_path % (self._image_idx - 1)
self.add_overlay(topleft, "Cap[t]ure frame", "Saved as %s" % fname)
else:
self.add_overlay(topleft, "Cap[t]ure frame", "")
self.add_overlay(topleft, "Toggle geomgroup visibility", "0-4")
self.add_overlay(bottomleft, "FPS", "%d%s" % (1 / self._time_per_render, ""))
if mujoco.__version__ >= "3.0.0":
self.add_overlay(
bottomleft, "Solver iterations", str(self.data.solver_niter[0] + 1)
)
elif mujoco.__version__ < "3.0.0":
self.add_overlay(
bottomleft, "Solver iterations", str(self.data.solver_iter + 1)
)
self.add_overlay(
bottomleft, "Step", str(round(self.data.time / self.model.opt.timestep))
)
self.add_overlay(bottomleft, "timestep", "%.5f" % self.model.opt.timestep)
| WindowViewer |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-code-hierarchy/llama_index/packs/code_hierarchy/code_hierarchy.py | {
"start": 1295,
"end": 5415
} | class ____(BaseModel):
"""
Options for capturing the signature of a node.
"""
start_signature_types: Optional[List[_SignatureCaptureType]] = Field(
None,
description=(
"A list of node types any of which indicate the beginning of the signature."
"If this is none or empty, use the start_byte of the node."
),
)
end_signature_types: Optional[List[_SignatureCaptureType]] = Field(
None,
description=(
"A list of node types any of which indicate the end of the signature."
"If this is none or empty, use the end_byte of the node."
),
)
name_identifier: str = Field(
description=(
"The node type to use for the signatures 'name'.If retrieving the name is"
" more complicated than a simple type match, use a function which takes a"
" node and returns true or false as to whether its the name or not. The"
" first match is returned."
)
)
"""
Maps language -> Node Type -> SignatureCaptureOptions
The best way for a developer to discover these is to put a breakpoint at the TIP
tag in _chunk_node, and then create a unit test for some code, and then iterate
through the code discovering the node names.
"""
_DEFAULT_SIGNATURE_IDENTIFIERS: Dict[str, Dict[str, _SignatureCaptureOptions]] = {
"python": {
"function_definition": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="block", inclusive=False)],
name_identifier="identifier",
),
"class_definition": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="block", inclusive=False)],
name_identifier="identifier",
),
},
"html": {
"element": _SignatureCaptureOptions(
start_signature_types=[_SignatureCaptureType(type="<", inclusive=True)],
end_signature_types=[_SignatureCaptureType(type=">", inclusive=True)],
name_identifier="tag_name",
)
},
"cpp": {
"class_specifier": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="{", inclusive=False)],
name_identifier="type_identifier",
),
"function_definition": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="{", inclusive=False)],
name_identifier="function_declarator",
),
},
"typescript": {
"interface_declaration": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="{", inclusive=False)],
name_identifier="type_identifier",
),
"lexical_declaration": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="{", inclusive=False)],
name_identifier="identifier",
),
"function_declaration": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="{", inclusive=False)],
name_identifier="identifier",
),
"class_declaration": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="{", inclusive=False)],
name_identifier="type_identifier",
),
"method_definition": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="{", inclusive=False)],
name_identifier="property_identifier",
),
},
"php": {
"function_definition": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="}", inclusive=False)],
name_identifier="name",
),
"class_declaration": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="}", inclusive=False)],
name_identifier="name",
),
"method_declaration": _SignatureCaptureOptions(
end_signature_types=[_SignatureCaptureType(type="}", inclusive=False)],
name_identifier="name",
),
},
}
| _SignatureCaptureOptions |
python | streamlit__streamlit | lib/tests/streamlit/elements/arrow_dataframe_test.py | {
"start": 1624,
"end": 20207
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall arrow protos."""
def test_default_params(self):
"""Test that it can be called with a dataframe."""
df = pd.DataFrame({"a": [1, 2, 3]})
st.dataframe(df)
el = self.get_delta_from_queue().new_element
proto = el.arrow_data_frame
pd.testing.assert_frame_equal(convert_arrow_bytes_to_pandas_df(proto.data), df)
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
# Since dataframe and data editor share the same proto, we also test for
# properties only relevant for an editable dataframe.
assert proto.height == 0
assert proto.editing_mode == ArrowProto.EditingMode.READ_ONLY
assert proto.selection_mode == []
assert not proto.disabled
assert proto.column_order == []
assert proto.form_id == ""
assert proto.columns == "{}"
# ID should not be set:
assert proto.id == ""
# Row height is marked optional should not be set if not specified
assert not proto.HasField("row_height")
assert proto.row_height == 0
assert not proto.HasField("placeholder")
def test_dataframe_only_data(self):
df = mock_data_frame()
st.dataframe(df)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
pd.testing.assert_frame_equal(convert_arrow_bytes_to_pandas_df(proto.data), df)
def test_column_order_parameter(self):
"""Test that it can be called with column_order."""
st.dataframe(pd.DataFrame(), column_order=["a", "b"])
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.column_order == ["a", "b"]
def test_empty_column_order_parameter(self):
"""Test that an empty column_order is correctly added."""
st.dataframe(pd.DataFrame(), column_order=[])
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.column_order == []
@parameterized.expand(SHARED_TEST_CASES)
def test_with_compatible_data(
self,
name: str,
input_data: Any,
metadata: CaseMetadata,
):
"""Test that it can be called with compatible data."""
st.dataframe(input_data)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
reconstructed_df = convert_arrow_bytes_to_pandas_df(proto.data)
assert reconstructed_df.shape[0] == metadata.expected_rows
assert reconstructed_df.shape[1] == metadata.expected_cols
def test_hide_index_true(self):
"""Test that it can be called with hide_index=True param."""
data_df = pd.DataFrame(
{
"a": pd.Series([1, 2]),
"b": pd.Series(["foo", "bar"]),
}
)
st.dataframe(data_df, hide_index=True)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.columns == json.dumps({INDEX_IDENTIFIER: {"hidden": True}})
def test_hide_index_false(self):
"""Test that it can be called with hide_index=False param."""
data_df = pd.DataFrame(
{
"a": pd.Series([1, 2]),
"b": pd.Series(["foo", "bar"]),
}
)
st.dataframe(data_df, hide_index=False)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.columns == json.dumps({INDEX_IDENTIFIER: {"hidden": False}})
def test_row_height_parameter(self):
"""Test that it can be called with row_height."""
st.dataframe(pd.DataFrame(), row_height=100)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.row_height == 100
def test_placeholder_parameter(self):
"""Test that it can be called with placeholder."""
st.dataframe(pd.DataFrame(), placeholder="-")
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.placeholder == "-"
def test_uuid(self):
df = mock_data_frame()
styler = df.style
styler.set_uuid("FAKE_UUID")
st.dataframe(styler)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.styler.uuid == "FAKE_UUID"
def test_caption(self):
df = mock_data_frame()
styler = df.style
styler.set_caption("FAKE_CAPTION")
st.dataframe(styler)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.styler.caption == "FAKE_CAPTION"
def test_cell_styles(self):
df = mock_data_frame()
styler = df.style
# NOTE: If UUID is not set - a random UUID will be generated.
styler.set_uuid("FAKE_UUID")
styler.highlight_max(axis=None)
st.dataframe(styler)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert (
proto.styler.styles == "#T_FAKE_UUID_row1_col2 { background-color: yellow }"
)
def test_display_values(self):
df = pd.DataFrame(
[[1, 2, 3], [4, 5, 6]],
)
styler = df.style.format("{:.2%}")
st.dataframe(styler)
expected = pd.DataFrame(
[["100.00%", "200.00%", "300.00%"], ["400.00%", "500.00%", "600.00%"]],
)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
pd.testing.assert_frame_equal(
convert_arrow_bytes_to_pandas_df(proto.styler.display_values), expected
)
def test_throw_exception_if_data_exceeds_styler_config(self):
"""Test that an exception is thrown if the dataframe exceeds the styler.render.max_elements config."""
pd.set_option("styler.render.max_elements", 5000)
# big example with default styler.render.max_elements
df = pd.DataFrame(list(range(5001)))
with pytest.raises(StreamlitAPIException):
st.dataframe(df.style.format("{:03d}"))
pd.reset_option("styler.render.max_elements")
@patch.object(Styler, "_translate")
def test_styler_translate_gets_called(self, mock_styler_translate):
"""Tests that `styler._translate` is called with correct arguments."""
df = mock_data_frame()
styler = df.style.set_uuid("FAKE_UUID")
st.dataframe(styler)
mock_styler_translate.assert_called_once_with(False, False)
def test_dataframe_uses_convert_anything_to_df(self):
"""Test that st.altair_chart uses convert_anything_to_df to convert input data."""
df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
with patch(
"streamlit.dataframe_util.convert_anything_to_pandas_df"
) as convert_anything_to_df:
convert_anything_to_df.return_value = df
st.dataframe(df)
convert_anything_to_df.assert_called_once()
def test_dataframe_on_select_initial_returns(self):
"""Test st.dataframe returns an empty selection as initial result."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
selection = st.dataframe(df, on_select="rerun", key="selectable_df")
assert selection.selection.rows == []
assert selection.selection.columns == []
assert selection.selection.cells == []
# Check that the selection state is added to the session state:
assert st.session_state.selectable_df.selection.rows == []
assert st.session_state.selectable_df.selection.columns == []
assert st.session_state.selectable_df.selection.cells == []
def test_dataframe_with_invalid_on_select(self):
"""Test that an exception is thrown if the on_select parameter is invalid."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
with pytest.raises(StreamlitAPIException):
st.dataframe(df, on_select="invalid")
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_inside_form_on_select_rerun(self):
"""Test that form id is marshalled correctly inside of a form."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
with st.form("form"):
st.dataframe(df, on_select="rerun")
# 2 elements will be created: form block, dataframe
assert len(self.get_all_deltas_from_queue()) == 2
form_proto = self.get_delta_from_queue(0).add_block
arrow_proto = self.get_delta_from_queue(1).new_element.arrow_data_frame
assert arrow_proto.form_id == form_proto.form.form_id
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
def test_selectable_df_disallows_callbacks_inside_form(self):
"""Test that an exception is thrown if a callback is defined with a
selectable dataframe inside a form."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
with pytest.raises(StreamlitAPIException), st.form("form"):
st.dataframe(df, on_select=lambda: None)
def test_selectable_df_throws_exception_with_modified_sessions_state(self):
"""Test that an exception is thrown if the session state is modified."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
st.session_state.selectable_df = {
"selection": {"rows": [1], "columns": ["col1"]},
}
with pytest.raises(StreamlitAPIException):
st.dataframe(df, on_select="rerun", key="selectable_df")
def test_shows_cached_widget_replay_warning(self):
"""Test that a warning is shown when selections are activated and
it is used inside a cached function."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
st.cache_data(lambda: st.dataframe(df, on_select="rerun"))()
# The widget itself is still created, so we need to go back one element more:
el = self.get_delta_from_queue(-2).new_element.exception
assert el.type == "CachedWidgetWarning"
assert el.is_warning
@parameterized.expand(
[
("rerun", [1]),
("ignore", []),
(lambda: None, [1]),
]
)
def test_dataframe_valid_on_select(self, on_select, proto_value):
"""Test that the on_select parameter is parsed correctly."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
st.dataframe(df, on_select=on_select)
el = self.get_delta_from_queue().new_element.arrow_data_frame
assert el.selection_mode == proto_value
@parameterized.expand(
[
(
("multi-row", "multi-column"),
[
ArrowProto.SelectionMode.MULTI_ROW,
ArrowProto.SelectionMode.MULTI_COLUMN,
],
),
(
{"single-row", "single-column"},
[
ArrowProto.SelectionMode.SINGLE_ROW,
ArrowProto.SelectionMode.SINGLE_COLUMN,
],
),
(
{"single-row", "multi-column"},
[
ArrowProto.SelectionMode.SINGLE_ROW,
ArrowProto.SelectionMode.MULTI_COLUMN,
],
),
(
("multi-row", "single-column", "single-cell"),
[
ArrowProto.SelectionMode.MULTI_ROW,
ArrowProto.SelectionMode.SINGLE_COLUMN,
ArrowProto.SelectionMode.SINGLE_CELL,
],
),
("single-row", [ArrowProto.SelectionMode.SINGLE_ROW]),
("multi-column", [ArrowProto.SelectionMode.MULTI_COLUMN]),
("single-cell", [ArrowProto.SelectionMode.SINGLE_CELL]),
("multi-cell", [ArrowProto.SelectionMode.MULTI_CELL]),
]
)
def test_selection_mode_parsing(self, input_modes, expected_modes):
"""Test that the selection_mode parameter is parsed correctly."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
st.dataframe(df, on_select="rerun", selection_mode=input_modes)
el = self.get_delta_from_queue().new_element
assert el.arrow_data_frame.selection_mode == expected_modes
@parameterized.expand(
[
(["invalid", "single-row"],),
(["single-row", "multi-row"],),
(["single-column", "multi-column"],),
(["single-cell", "multi-cell"],),
]
)
def test_selection_mode_parsing_invalid(self, invalid_modes):
"""Test that an exception is thrown if the selection_mode parameter is invalid."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
with pytest.raises(StreamlitAPIException):
st.dataframe(df, on_select="rerun", selection_mode=invalid_modes)
def test_selection_mode_deactivated(self):
"""Test that selection modes are ignored when selections are deactivated."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
st.dataframe(
df, on_select="ignore", selection_mode=["single-row", "multi-column"]
)
el = self.get_delta_from_queue().new_element
assert len(el.arrow_data_frame.selection_mode) == 0
def test_row_selection_auto_hides_range_index(self):
"""Test that a RangeIndex is auto-hidden when row selection is enabled.
When selections are activated (on_select != "ignore") and the
selection_mode is a single row-selection mode ("single-row" or
"multi-row"), a dataframe with a default RangeIndex should have its
index column hidden automatically.
"""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
st.dataframe(df, on_select="rerun", selection_mode="multi-row")
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert proto.columns == json.dumps({INDEX_IDENTIFIER: {"hidden": True}})
def test_row_selections_shows_custom_index(self):
"""Test that a custom index is shown when row selection is enabled."""
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"], index=["a", "b"])
st.dataframe(df, on_select="rerun", selection_mode="multi-row")
proto = self.get_delta_from_queue().new_element.arrow_data_frame
assert "hidden" not in proto.columns
def test_use_right_display_values(self):
"""Test that _use_display_values gets correct value for "display_value" instead of the original one."""
class Status(str, enum.Enum):
success = "Success status"
df = pd.DataFrame({"pipeline": ["Success"], "status": [Status.success]})
def apply_color(v: Status) -> str:
return "color: red" if v == Status.success else ""
if is_pandas_version_less_than("2.2.0"):
styler = df.style.applymap(apply_color, subset=["status"])
else:
styler = df.style.map(apply_color, subset=["status"])
st.dataframe(styler)
expected = pd.DataFrame(
{"pipeline": ["Success"], "status": ["Success status"]},
)
proto = self.get_delta_from_queue().new_element.arrow_data_frame
pd.testing.assert_frame_equal(
convert_arrow_bytes_to_pandas_df(proto.styler.display_values), expected
)
def test_use_container_width_true_shows_deprecation_warning(self):
"""Test that use_container_width=True shows deprecation warning and sets width='stretch'."""
with patch("streamlit.elements.arrow.show_deprecation_warning") as mock_warning:
st.dataframe(pd.DataFrame({"a": [1, 2, 3]}), use_container_width=True)
# Check deprecation warning is shown
mock_warning.assert_called_once()
assert "use_container_width" in mock_warning.call_args[0][0]
el = self.get_delta_from_queue().new_element
# When use_container_width=True, it should set width='stretch'
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
def test_use_container_width_false_shows_deprecation_warning(self):
"""Test that use_container_width=False shows deprecation warning and sets width='content'."""
with patch("streamlit.elements.arrow.show_deprecation_warning") as mock_warning:
st.dataframe(pd.DataFrame({"a": [1, 2, 3]}), use_container_width=False)
# Check deprecation warning is shown
mock_warning.assert_called_once()
assert "use_container_width" in mock_warning.call_args[0][0]
el = self.get_delta_from_queue().new_element
# When use_container_width=False, it should set width='content'
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_use_container_width_false_with_integer_width(self):
"""Test use_container_width=False with integer width preserves the integer."""
with patch("streamlit.elements.arrow.show_deprecation_warning") as mock_warning:
st.dataframe(
pd.DataFrame({"a": [1, 2, 3]}), width=400, use_container_width=False
)
# Check deprecation warning is shown
mock_warning.assert_called_once()
el = self.get_delta_from_queue().new_element
# When use_container_width=False and width is integer, preserve integer width
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == 400
@pytest.mark.usefixtures("benchmark")
def test_pandas_styler_performance(self):
"""Performance benchmark for using styled dataframes with st.dataframe."""
def large_styler_df() -> None:
# Create a large DF with random numbers:
df = pd.DataFrame(np.random.rand(10000, 10), columns=list("ABCDEFGHIJ"))
# Format all numbers with pandas styler:
styler = df.style.format("{:.2f}")
st.dataframe(styler)
self.benchmark(large_styler_df)
| ArrowDataFrameProtoTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/dataproc.py | {
"start": 3040,
"end": 4449
} | class ____(BaseOperatorLink):
"""
Helper class for constructing Dataproc resource link.
.. warning::
This link is pending to deprecate.
"""
name = "Dataproc resource"
key = "conf"
@staticmethod
def persist(
context: Context,
url: str,
resource: str,
region: str,
project_id: str,
):
context["task_instance"].xcom_push(
key=DataprocLink.key,
value={
"region": region,
"project_id": project_id,
"url": url,
"resource": resource,
},
)
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
conf = XCom.get_value(key=self.key, ti_key=ti_key)
return (
conf["url"].format(
region=conf["region"], project_id=conf["project_id"], resource=conf["resource"]
)
if conf
else ""
)
def __attrs_post_init__(self):
# This link is still used into the selected operators
# - airflow.providers.google.cloud.operators.dataproc.DataprocJobBaseOperator
# As soon as we remove reference to this link we might deprecate it by add warning message
# with `stacklevel=3` below in this method.
...
@attr.s(auto_attribs=True)
| DataprocLink |
python | sympy__sympy | sympy/physics/quantum/sho1d.py | {
"start": 16730,
"end": 18957
} | class ____(SHOState, Ket):
"""1D eigenket.
Inherits from SHOState and Ket.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket
This is usually its quantum numbers or its symbol.
Examples
========
Ket's know about their associated bra:
>>> from sympy.physics.quantum.sho1d import SHOKet
>>> k = SHOKet('k')
>>> k.dual
<k|
>>> k.dual_class()
<class 'sympy.physics.quantum.sho1d.SHOBra'>
Take the Inner Product with a bra:
>>> from sympy.physics.quantum import InnerProduct
>>> from sympy.physics.quantum.sho1d import SHOKet, SHOBra
>>> k = SHOKet('k')
>>> b = SHOBra('b')
>>> InnerProduct(b,k).doit()
KroneckerDelta(b, k)
Vector representation of a numerical state ket:
>>> from sympy.physics.quantum.sho1d import SHOKet, NumberOp
>>> from sympy.physics.quantum.represent import represent
>>> k = SHOKet(3)
>>> N = NumberOp('N')
>>> represent(k, basis=N, ndim=4)
Matrix([
[0],
[0],
[0],
[1]])
"""
@classmethod
def dual_class(self):
return SHOBra
def _eval_innerproduct_SHOBra(self, bra, **hints):
result = KroneckerDelta(self.n, bra.n)
return result
def _represent_default_basis(self, **options):
return self._represent_NumberOp(None, **options)
def _represent_NumberOp(self, basis, **options):
ndim_info = options.get('ndim', 4)
format = options.get('format', 'sympy')
options['spmatrix'] = 'lil'
vector = matrix_zeros(ndim_info, 1, **options)
if isinstance(self.n, Integer):
if self.n >= ndim_info:
return ValueError("N-Dimension too small")
if format == 'scipy.sparse':
vector[int(self.n), 0] = 1.0
vector = vector.tocsr()
elif format == 'numpy':
vector[int(self.n), 0] = 1.0
else:
vector[self.n, 0] = S.One
return vector
else:
return ValueError("Not Numerical State")
| SHOKet |
python | django-crispy-forms__django-crispy-forms | crispy_forms/bootstrap.py | {
"start": 20455,
"end": 22502
} | class ____(Div):
"""
Base class used for `TabHolder` and `Accordion`, groups containers.
Attributes
----------
template : str
The default template which this Layout Object will be rendered
with.
css_class : str, optional
CSS classes to be applied to the ``<div>``. By default None.
Parameters
----------
*fields : str, LayoutObject
Any number of fields or layout objects as positional arguments to be
rendered within the ``<div>``.
css_id : str, optional
A DOM id for the layout object which will be added to the ``<div>`` if
provided. By default None.
css_class : str, optional
Additional CSS classes to be applied in addition to those declared by
the class itself. By default None.
template : str, optional
Overrides the default template, if provided. By default None.
**kwargs : dict, optional
Additional attributes are passed to ``flatatt`` and converted into
key="value", pairs. These attributes are added to the ``<div>``.
"""
def first_container_with_errors(self, errors):
"""
Returns the first container with errors, otherwise returns None.
"""
for tab in self.fields:
errors_here = any(error in tab for error in errors)
if errors_here:
return tab
return None
def open_target_group_for_form(self, form):
"""
Makes sure that the first group that should be open is open.
This is either the first group with errors or the first group
in the container, unless that first group was originally set to
active=False.
"""
target = self.first_container_with_errors(form.errors.keys())
if target is None:
target = self.fields[0]
if not getattr(target, "_active_originally_included", None):
target.active = True
return target
target.active = True
return target
| ContainerHolder |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_early_stopping.py | {
"start": 20012,
"end": 20348
} | class ____(BoringModel):
def __init__(self):
super().__init__()
self.epoch_losses = [1.0, 0.5, float("nan")]
def on_validation_epoch_end(self):
loss = self.epoch_losses[self.current_epoch] if self.current_epoch < len(self.epoch_losses) else float("nan")
self.log("val_loss", loss)
| ModelWithNaNLoss |
python | walkccc__LeetCode | solutions/2896. Apply Operations to Make Two Strings Equal/2896-2.py | {
"start": 0,
"end": 649
} | class ____:
def minOperations(self, s1: str, s2: str, x: int) -> int:
diffIndices = [i for i, (a, b) in enumerate(zip(s1, s2))
if a != b]
if not diffIndices:
return 0
# It's impossible to make two strings equal if there are odd number of
# differences.
if len(diffIndices) & 1:
return -1
# dp[i] := the minimum cost to correct diffIndices[i:]
dp = [math.inf] * len(diffIndices) + [0]
dp[-2] = x / 2
for i in reversed(range(len(diffIndices) - 1)):
dp[i] = min(dp[i + 1] + x / 2,
dp[i + 2] + diffIndices[i + 1] - diffIndices[i])
return int(dp[0])
| Solution |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 9599,
"end": 10830
} | class ____(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [
(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64),
]:
for i in range(1, s):
assert_equal(
hash(st(-(2**i))), hash(-(2**i)), err_msg=f"{st!r}: -2**{i:d}"
)
assert_equal(
hash(st(2 ** (i - 1))),
hash(2 ** (i - 1)),
err_msg=f"{st!r}: 2**{i - 1:d}",
)
assert_equal(
hash(st(2**i - 1)),
hash(2**i - 1),
err_msg=f"{st!r}: 2**{i:d} - 1",
)
i = max(i - 1, 1)
assert_equal(
hash(ut(2 ** (i - 1))),
hash(2 ** (i - 1)),
err_msg=f"{ut!r}: 2**{i - 1:d}",
)
assert_equal(
hash(ut(2**i - 1)),
hash(2**i - 1),
err_msg=f"{ut!r}: 2**{i:d} - 1",
)
@xpassIfTorchDynamo_np # (reason="TODO: hash")
| TestHash |
python | PyCQA__pylint | pylint/reporters/ureports/nodes.py | {
"start": 4425,
"end": 4589
} | class ____(BaseLayout):
"""A simple text paragraph.
attributes :
* BaseLayout attributes
A paragraph must not contains a section !
"""
| Paragraph |
python | getsentry__sentry | src/sentry/explore/models.py | {
"start": 2386,
"end": 4970
} | class ____(DefaultFieldsModel):
"""
A saved Explore query
"""
__relocation_scope__ = RelocationScope.Organization
projects = models.ManyToManyField("sentry.Project", through=ExploreSavedQueryProject)
organization = FlexibleForeignKey("sentry.Organization")
created_by_id = HybridCloudForeignKey("sentry.User", null=True, on_delete="SET_NULL")
name = models.CharField(max_length=255)
query = models.JSONField()
visits = BoundedBigIntegerField(null=True, default=1)
last_visited = models.DateTimeField(null=True, default=timezone.now)
dataset = BoundedPositiveIntegerField(
choices=ExploreSavedQueryDataset.as_choices(), default=ExploreSavedQueryDataset.SPANS
)
is_multi_query = models.BooleanField(default=False)
# The corresponding prebuilt_id found in hardcoded prebuilt queries from src/sentry/explore/endpoints/explore_saved_queries.py
# If the saved query is not a prebuilt query, this will be None
prebuilt_id = BoundedPositiveIntegerField(null=True, db_default=None)
# The version of the prebuilt query. If the version found in the explore_saved_queries.py hardcoded list is greater, then the saved
# query out of date and should be updated..
prebuilt_version = BoundedPositiveIntegerField(null=True, db_default=None)
# This field is to be used for the discover -> explore migration. This contains the reason why any part
# of the saved query was changed so we can display our reasonings in the UI
changed_reason = models.JSONField(null=True, default=None)
class Meta:
app_label = "explore"
db_table = "explore_exploresavedquery"
unique_together = (("organization", "prebuilt_id"),)
__repr__ = sane_repr("organization_id", "created_by_id", "name")
def set_projects(self, project_ids):
with transaction.atomic(router.db_for_write(ExploreSavedQueryProject)):
ExploreSavedQueryProject.objects.filter(explore_saved_query=self).exclude(
project__in=project_ids
).delete()
existing_project_ids = ExploreSavedQueryProject.objects.filter(
explore_saved_query=self
).values_list("project", flat=True)
new_project_ids = sorted(set(project_ids) - set(existing_project_ids))
ExploreSavedQueryProject.objects.bulk_create(
[
ExploreSavedQueryProject(project_id=project_id, explore_saved_query=self)
for project_id in new_project_ids
]
)
| ExploreSavedQuery |
python | dagster-io__dagster | docs/sphinx/_ext/sphinx-click/tests/test_formatter.py | {
"start": 19289,
"end": 20865
} | class ____(unittest.TestCase):
"""Validate filtering of commands."""
maxDiff = None
@staticmethod
def _get_ctx():
@click.group()
def cli():
"""A sample command group."""
@cli.command()
def hello():
"""A sample command."""
@cli.command()
def world():
"""A world command."""
return click.Context(cli, info_name="cli")
def test_no_commands(self):
"""Validate an empty command group."""
ctx = self._get_ctx()
output = list(ext._format_command(ctx, nested="short", commands=[])) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command group.
.. program:: cli
.. code-block:: shell
cli [OPTIONS] COMMAND [ARGS]...
"""
).lstrip(),
"\n".join(output),
)
def test_order_of_commands(self):
"""Validate the order of commands."""
ctx = self._get_ctx()
output = list(ext._format_command(ctx, nested="short", commands=["world", "hello"])) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command group.
.. program:: cli
.. code-block:: shell
cli [OPTIONS] COMMAND [ARGS]...
.. rubric:: Commands
.. object:: world
A world command.
.. object:: hello
A sample command.
"""
).lstrip(),
"\n".join(output),
)
| CommandFilterTestCase |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/tracing_compilation.py | {
"start": 2091,
"end": 2274
} | class ____(enum.Enum):
"""Enumerate scopes under which functions might be traced."""
NO_SCOPE = 1
VARIABLE_CREATION = 2
NO_VARIABLE_CREATION = 3
@dataclasses.dataclass
| ScopeType |
python | ansible__ansible | test/units/module_utils/datatag/test_datatag.py | {
"start": 11659,
"end": 30535
} | class ____(AutoParamSupport):
later = t.cast(t.Self, Later(locals()))
tag_instances_with_reprs: t.Annotated[t.List[t.Tuple[AnsibleDatatagBase, str]], ParamDesc(["value", "expected_repr"])] = [
(Deprecated(msg="hi mom, I am deprecated", date='2023-01-02', version="42.42"),
"Deprecated(msg='hi mom, I am deprecated', date='2023-01-02', version='42.42')"),
(Deprecated(msg="minimal"), "Deprecated(msg='minimal')")
]
taggable_container_instances: t.List[c.Collection] = [
dict(hi="mom"),
['hi', 'mom'],
{'hi mom'}, # kept as a single item set to allow repr() testing without worrying about non-deterministic order of set items
("hi", "mom",),
]
taggable_instances: t.List[object] = taggable_container_instances + [
b'hi mom',
42.0,
42,
"hi mom",
datetime.datetime(2023, 9, 15, 21, 5, 30, 1900, datetime.timezone.utc),
datetime.date(2023, 9, 15),
datetime.time(21, 5, 30, 1900),
]
tagged_object_instances: t.List[AnsibleTaggedObject] = [
t.cast(AnsibleTaggedObject, ExampleTagWithContent(content_str=__file__).tag(item)) for item in taggable_instances]
datatag_instances: t.List[AnsibleDatatagBase]
serializable_instances: t.List[object]
serializable_instances_with_instance_copy: t.List[CopyProtocol]
serializable_types: t.List[t.Type[AnsibleSerializable]]
@classmethod
def post_init(cls) -> None:
cls.datatag_instances = [value for value, _repr in cls.tag_instances_with_reprs]
cls.serializable_instances = [value for value in (
cls.datatag_instances + cls.tagged_object_instances + message_instances) if cls.is_type_applicable(value)]
cls.serializable_instances_with_instance_copy = [t.cast(CopyProtocol, item) for item in cls.serializable_instances if hasattr(item, 'copy')]
# NOTE: this doesn't include the lazy template types, those are tested separately
cls.serializable_types = [value for value in (
list(AnsibleSerializable._known_type_map.values()) + [AnsibleSerializable]) if cls.is_type_applicable(value)]
@classmethod
def is_type_applicable(cls, type_obj) -> bool:
return (
type_obj.__module__ != __name__ and # exclude test-only tags/objects
cls.is_controller_only_type(type_obj) == cls.is_controller_only_test()
)
@staticmethod
def is_controller_only_type(type_obj: type) -> bool:
return 'module_utils' not in type_obj.__module__
@classmethod
def is_controller_only_test(cls) -> bool:
return cls.is_controller_only_type(cls)
@classmethod
def container_test_cases(cls) -> t.Annotated[t.List[t.Tuple[t.Any, t.Optional[type], type]], ParamDesc(["value", "value_type", "type_under_test"])]:
"""
Return container test parameters for the given test case.
Called during each test run to create the test value on-demand.
"""
test_cases = []
for test_case in create_container_test_cases(_ANSIBLE_ALLOWED_NON_SCALAR_COLLECTION_VAR_TYPES):
instances = cls.taggable_instances + cls.tagged_object_instances
# pylint: disable=unidiomatic-typecheck
candidates = [instance for instance in instances if type(instance) is test_case.type_under_test]
assert len(candidates) == 1, f"container_test_parameters found {len(candidates)}, expected 1"
value = candidates[0]
test_cases.append(create_container_test_parameters(test_case, value))
return test_cases
@pytest.mark.autoparam(later.tag_instances_with_reprs)
def test_tag_repr(self, value: t.Any, expected_repr: str):
assert repr(value) == expected_repr
@pytest.mark.autoparam(later.container_test_cases)
def test_tag_copy(self, value: t.Collection, value_type: type | None, type_under_test: type) -> None:
"""Ensure copying tags returns the correct type and tags."""
tag = ExampleSingletonTag()
src = tag.tag("tagged")
result: t.Collection = AnsibleTagHelper.tag_copy(src, value, value_type=value_type)
assert isinstance(result, type_under_test)
assert tag in AnsibleTagHelper.tags(result)
@pytest.mark.autoparam(later.taggable_instances)
@pytest.mark.allow_delazify # this test requires a working templar on lazies
def test_dir(self, value: object) -> None:
"""Ensure the dir() of a tagged instance is identical to the dir() returned by the underlying native Python type, excluding `_` prefixed names."""
tagged_instance = ExampleSingletonTag().tag(value)
assert tagged_instance is not value
assert ([name for name in dir(tagged_instance) if not name.startswith('_')] ==
[name for name in dir(AnsibleTagHelper.as_native_type(tagged_instance)) if not name.startswith('_')])
@pytest.mark.autoparam(later.taggable_instances)
@pytest.mark.allow_delazify # this test requires a working templar on lazies
def test_repr(self, value: object) -> None:
"""Ensure the repr() of a tagged instance is identical to the repr() returned by the underlying native Python type."""
tagged_instance = ExampleSingletonTag().tag(value)
assert tagged_instance is not value
assert repr(tagged_instance) == repr(value)
@pytest.mark.autoparam(later.taggable_instances)
@pytest.mark.allow_delazify # this test requires a working templar on lazies
def test_str(self, value: object) -> None:
"""Ensure the str() of a tagged instance is identical to the str() returned by the underlying native Python type."""
tagged_instance = ExampleSingletonTag().tag(value)
assert tagged_instance is not value
assert str(tagged_instance) == str(value)
def test_serializable_instances_cover_all_concrete_impls(self):
tested_types = {type(instance_type) for instance_type in self.serializable_instances}
excluded_type_names = {
AnsibleTaggedObject.__name__, # base class, cannot be abstract
AnsibleSerializableDataclass.__name__, # base class, cannot be abstract
AnsibleSerializable.__name__, # base class, cannot be abstract
AnsibleSerializableEnum.__name__, # base class, cannot be abstract
# these types are all controller-only, so it's easier to have static type names instead of importing them
'JinjaConstTemplate', # serialization not required
'_EncryptedSource', # serialization not required
'CapturedErrorSummary', # serialization not required
}
# don't require instances for types marked abstract or types that are clearly intended to be so (but can't be marked as such)
required_types = {instance_type for instance_type in self.serializable_types if (
not inspect.isabstract(instance_type) and
not instance_type.__name__.endswith('Base') and
'Lazy' not in instance_type.__name__ and # lazy types use the same input data
instance_type.__name__ not in excluded_type_names and
not issubclass(instance_type, AnsibleSerializableWrapper)
)}
missing_types = required_types.difference(tested_types)
assert not missing_types
@pytest.mark.autoparam(later.serializable_instances)
@pytest.mark.allow_delazify # this test requires a working templar on lazies
def test_json_roundtrip(self, value: object):
"""
Verify that the serialization infrastructure (profiles) can round-trip any type we choose to support.
For taggable types which have no production use-case for round-tripping, the necessary wrapper types have been implemented in this test module.
"""
payload = json.dumps(value, cls=RoundTripEverythingEncoder)
round_tripped_value = json.loads(payload, cls=RoundTripEverythingDecoder)
assert_round_trip(value, round_tripped_value)
@pytest.mark.autoparam(later.serializable_instances)
def test_pickle_roundtrip(self, value: object):
if "Lazy" in type(value).__name__:
pytest.xfail("pickle prohibited on lazies")
pickled_value = pickle.dumps(value)
round_tripped_value = pickle.loads(pickled_value)
assert_round_trip(value, round_tripped_value)
@pytest.mark.autoparam(later.serializable_instances)
def test_deepcopy_roundtrip(self, value: object):
if "Lazy" in type(value).__name__:
pytest.xfail("deepcopy not supported on lazies yet")
round_tripped_value = copy.deepcopy(value)
# DTFIX5: ensure items in collections are copies
assert_round_trip(value, round_tripped_value, via_copy=True)
@pytest.mark.autoparam(later.tagged_object_instances)
def test_native_copy(self, value: AnsibleTaggedObject) -> None:
native_copy = value._native_copy()
assert type(value) is not type(native_copy)
assert isinstance(value, type(native_copy))
if not isinstance(native_copy, int):
assert native_copy is not value._native_copy()
# DTFIX5: ensure items in collections are not copies
assert native_copy == value
assert native_copy == value._native_copy()
@pytest.mark.autoparam(later.serializable_instances)
def test_copy_roundtrip(self, value: object):
if "Lazy" in type(value).__name__:
pytest.xfail("copy prohibited on lazies")
round_tripped_value = copy.copy(value)
# DTFIX5: ensure items in collections are not copies
assert_round_trip(value, round_tripped_value, via_copy=True)
@pytest.mark.autoparam(later.serializable_instances_with_instance_copy)
def test_instance_copy_roundtrip(self, value: CopyProtocol):
round_tripped_value = value.copy()
# DTFIX5: ensure items in collections are not copies
assert_round_trip(value, round_tripped_value)
test_dataclass_tag_base_field_validation_fail_instances: t.Annotated[
t.List[t.Tuple[t.Type[AnsibleDatatagBase], t.Dict[str, object]]], ParamDesc(["tag_type", "init_kwargs"])
] = [
(Deprecated, dict(msg=ExampleSingletonTag().tag(''))),
(Deprecated, dict(date=ExampleSingletonTag().tag(''), msg='')),
(Deprecated, dict(version=ExampleSingletonTag().tag(''), msg='')),
]
@pytest.mark.autoparam(later.test_dataclass_tag_base_field_validation_fail_instances)
def test_dataclass_tag_base_field_validation_fail(self, tag_type: t.Callable, init_kwargs: t.Dict[str, t.Any]) -> None:
field_name = list(init_kwargs.keys())[0]
actual_type = type(init_kwargs[field_name])
with pytest.raises(TypeError, match=f"{field_name} must be <class '.*'> instead of {actual_type}"):
tag_type(**init_kwargs)
test_dataclass_tag_base_field_validation_pass_instances: t.Annotated[
t.List[t.Tuple[t.Type[AnsibleDatatagBase], t.Dict[str, object]]], ParamDesc(["tag_type", "init_kwargs"])
] = [
(Deprecated, dict(msg='')),
(Deprecated, dict(msg='', date='2025-01-01')),
(Deprecated, dict(msg='', version='')),
]
@pytest.mark.autoparam(later.test_dataclass_tag_base_field_validation_pass_instances)
def test_dataclass_tag_base_field_validation_pass(self, tag_type: t.Callable, init_kwargs: t.Dict[str, t.Any]) -> None:
tag_type(**init_kwargs)
@pytest.mark.autoparam(later.taggable_instances)
@pytest.mark.allow_delazify # this test requires a working templar on lazies
def test_as_untagged_type(self, value: object) -> None:
"""
Ensure that `as_untagged_type` preserves object reference identity for untagged inputs, and
that tagged inputs are returned as their original native types.
"""
tagged_instance = ExampleSingletonTag().tag(value)
roundtripped_instance = AnsibleTagHelper.as_native_type(tagged_instance)
if not isinstance(value, AnsibleTaggedObject): # lazies are always a tagged type, so as_untagged_type will be a copy
assert AnsibleTagHelper.as_native_type(value) is value
assert type(roundtripped_instance) is type(value)
assert roundtripped_instance == value
@pytest.mark.autoparam(later.taggable_instances)
def test_untag(self, value: object) -> None:
"""Ensure tagging and then untagging a taggable instance returns new instances as appropriate, with the correct tags and type."""
tagged_instance = ExampleSingletonTag().tag(AnotherExampleSingletonTag().tag(value))
tags_unchanged = Deprecated.untag(tagged_instance) # not tagged with this value, nothing to do
assert tags_unchanged is tagged_instance
one_less_tag = AnotherExampleSingletonTag.untag(tagged_instance)
assert one_less_tag is not tagged_instance
assert type(one_less_tag) is type(tagged_instance) # pylint: disable=unidiomatic-typecheck
assert AnsibleTagHelper.tags(one_less_tag) == frozenset((ExampleSingletonTag(),))
no_tags = ExampleSingletonTag.untag(one_less_tag)
assert no_tags is not one_less_tag
assert type(no_tags) is type(value)
assert AnsibleTagHelper.tags(no_tags) is _empty_frozenset
still_no_tags = ExampleSingletonTag.untag(no_tags)
assert still_no_tags is no_tags
@pytest.mark.autoparam(later.serializable_types)
def test_slots(self, value: type) -> None:
"""Assert that __slots__ are properly defined on the given serializable type."""
if value in (AnsibleSerializable, AnsibleTaggedObject):
expect_slots = True # non-dataclass base types have no attributes, but still use slots
elif issubclass(value, (int, bytes, tuple, enum.Enum)):
# non-empty slots are not supported by these variable-length data types
# see: https://docs.python.org/3/reference/datamodel.html
expect_slots = False
elif issubclass(value, AnsibleSerializableDataclass) or value == AnsibleSerializableDataclass:
assert dataclasses.is_dataclass(value) # everything extending AnsibleSerializableDataclass must be a dataclass
expect_slots = sys.version_info >= (3, 10) # 3.10+ dataclasses have attributes (and support slots)
else:
expect_slots = True # normal types have attributes (and slots)
# check for slots on the type itself, ignoring slots on parents
has_slots = '__slots__' in value.__dict__
assert has_slots == expect_slots
# instances of concrete types using __slots__ should not have __dict__ (which would indicate missing __slots__ definitions in the class hierarchy)
serializable_instance = {type(instance): instance for instance in self.serializable_instances}.get(value)
if serializable_instance:
has_dict = hasattr(serializable_instance, '__dict__')
assert has_dict != expect_slots
#
# WORKING
@pytest.mark.autoparam(later.container_test_cases)
def test_tag(self, value: object, value_type: type | None, type_under_test: type) -> None:
"""Ensure tagging a value returns the correct type and tags."""
tag = ExampleSingletonTag()
result: t.Collection = AnsibleTagHelper.tag(value, tags=tag, value_type=value_type) # type: ignore[arg-type]
assert isinstance(result, type_under_test)
assert tag in AnsibleTagHelper.tags(result)
def create_container_test_parameters(test_case: ContainerTestCase, value: t.Any) -> t.Tuple[t.Any, t.Optional[type], type]:
"""
Return container test parameters for the given test case and collection instance.
The result is tuple of three values:
- 1) The value or a generator.
- 2) The type represented by the generator, or None when not using a generator.
- 3) The type represented by the value.
"""
if test_case.use_generator:
# This test creates a generator to source items from the value to facilitate optimized creation of collections when tagging and copying tags.
# To avoid triggering special behavior during iteration, a native copy is used when the value is a tagged object.
if isinstance(value, AnsibleTaggedObject):
native_value = value._native_copy()
else:
native_value = value
if isinstance(value, c.Mapping):
generator = ((k, v) for k, v in native_value.items())
else:
generator = (item for item in native_value)
return generator, test_case.type_under_test, test_case.type_under_test # testing via a generator, which requires use of value_type
return value, None, test_case.type_under_test # testing the actual type without specifying value_type
def create_container_test_cases(types: t.Iterable[t.Type[t.Collection]]) -> t.List[ContainerTestCase]:
"""
Return a list of test cases for the given types.
Each type will result in two test cases, one that uses a generator and one that does not.
"""
sources: list[ContainerTestCase] = []
for type_under_test in sorted(types, key=lambda item: item.__name__):
sources.extend((
ContainerTestCase(type_under_test, False), # testing the actual type without specifying value_type
ContainerTestCase(type_under_test, True), # testing via a generator, which requires use of value_type
))
return sources
@pytest.mark.parametrize("value, expected_type_name", (
(1, 'int'),
(ExampleSingletonTag().tag(1), 'int'),
(str, 'str'),
(_AnsibleTaggedStr, 'str'),
))
def test_friendly_name(value: object, expected_type_name: str) -> None:
assert native_type_name(value) == expected_type_name
def test_deserialize_unknown_type() -> None:
with pytest.raises(ValueError):
AnsibleSerializable._deserialize({AnsibleSerializable._TYPE_KEY: 'bogus'})
def test_conflicting_tagged_type_map_entry():
def create_problem():
class SecondaryDict(dict, AnsibleTaggedObject):
pass
return SecondaryDict() # pragma: nocover
with pytest.raises(TypeError, match="Cannot define type 'SecondaryDict' since '_AnsibleTaggedDict' already extends 'dict'."):
create_problem()
@pytest.mark.parametrize("value,expected_idx", (
(('a', ExampleSingletonTag().tag('b'), ExampleSingletonTag().tag('c')), 1),
((ExampleSingletonTag().tag('a'), ExampleSingletonTag().tag('b'), 'c'), 0),
((ExampleTagWithContent(content_str='').tag('a'), 'b'), None),
))
def test_first_tagged_on(value: c.Sequence, expected_idx: int | None):
expected = value[expected_idx] if expected_idx is not None else None
assert ExampleSingletonTag.first_tagged_on(*value) is expected
| TestDatatagTarget |
python | google__pytype | pytype/tests/test_six_overlay1.py | {
"start": 81,
"end": 2005
} | class ____(test_base.BaseTest):
"""Tests for six and six_overlay."""
def test_six_moves_import(self):
self.Check("""
import six
def use_range():
for x in six.moves.range(1, 10):
x
""")
def test_add_metaclass(self):
"""Like the test in test_abc but without a fake six.pyi."""
self.Check("""
import abc
import six
class A:
def __init__(self):
self.foo = "hello"
@six.add_metaclass(abc.ABCMeta)
class Foo(A):
@abc.abstractmethod
def get_foo(self):
pass
class Bar(Foo):
def get_foo(self):
return self.foo
x = Bar().get_foo()
""")
def test_with_metaclass(self):
self.Check("""
import abc
import six
class A:
def __init__(self):
self.foo = "hello"
class B:
def bar(self):
return 42
class Foo(six.with_metaclass(abc.ABCMeta, A), B):
@abc.abstractmethod
def get_foo(self):
pass
class Bar(Foo):
def get_foo(self):
return self.foo
x = Bar().get_foo()
y = Bar().bar()
""")
def test_with_metaclass_any(self):
self.Check("""
import six
from typing import Any
Meta = type # type: Any
class Foo(six.with_metaclass(Meta)):
pass
""")
def test_type_init(self):
ty = self.Infer("""
import six
class Foo(type):
def __init__(self, *args):
self.x = 42
@six.add_metaclass(Foo)
class Bar:
pass
x1 = Bar.x
x2 = Bar().x
""")
self.assertTypesMatchPytd(
ty,
"""
import six
class Foo(type):
x: int
def __init__(self, *args) -> None: ...
class Bar(object, metaclass=Foo):
x: int
x1: int
x2: int
""",
)
if __name__ == "__main__":
test_base.main()
| SixTests |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 38070,
"end": 39203
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
latest_repair_id: Optional[int] = Field(
default=None,
description=(
"The ID of the latest repair. This parameter is not required when repairing"
" a run for the first time, but must be provided on subsequent requests to"
" repair the same run."
),
examples=[734650698524280],
)
rerun_all_failed_tasks: Optional[bool] = Field(
False,
description=(
"If true, repair all failed tasks. Only one of rerun_tasks or"
" rerun_all_failed_tasks can be used."
),
)
rerun_tasks: Optional[List[str]] = Field(
default=None,
description="The task keys of the task runs to repair.",
examples=["task0", "task1"],
)
run_id: Optional[int] = Field(
default=None,
description=(
"The job run ID of the run to repair. The run must not be in progress."
),
examples=[455644833],
)
| RepairRunInput |
python | PyCQA__pycodestyle | pycodestyle.py | {
"start": 80804,
"end": 83883
} | class ____:
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum(self.counters[key]
for key in self.messages if key.startswith(prefix))
def get_statistics(self, prefix=''):
"""Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('{:<7.2f} {}'.format(self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
| BaseReport |
python | getsentry__sentry | src/sentry/plugins/bases/data_forwarding.py | {
"start": 343,
"end": 2314
} | class ____(Plugin):
def has_project_conf(self) -> bool:
return True
def get_rate_limit(self):
"""
Returns a tuple of (Number of Requests, Window in Seconds)
"""
return (50, 1)
def forward_event(self, event: Event, payload: MutableMapping[str, Any]) -> bool:
"""Forward the event and return a boolean if it was successful."""
raise NotImplementedError
def get_event_payload(self, event):
return serialize(event)
def get_plugin_type(self) -> str:
return "data-forwarding"
def get_rl_key(self, event) -> str:
return f"{self.conf_key}:{event.project.organization_id}"
def initialize_variables(self, event):
return
def is_ratelimited(self, event):
self.initialize_variables(event)
rl_key = self.get_rl_key(event)
# limit segment to 50 requests/second
limit, window = self.get_rate_limit()
if limit and window and ratelimits.backend.is_limited(rl_key, limit=limit, window=window):
logger.info(
"data_forwarding.skip_rate_limited",
extra={
"event_id": event.event_id,
"issue_id": event.group_id,
"project_id": event.project_id,
"organization_id": event.project.organization_id,
},
)
return True
return False
def post_process(self, *, event, **kwargs) -> None:
if features.has("organizations:data-forwarding-revamp-access", event.project.organization):
return
if self.is_ratelimited(event):
return
payload = self.get_event_payload(event)
success = self.forward_event(event, payload)
if success is False:
# TODO(dcramer): record failure
pass
tsdb.backend.incr(TSDBModel.project_total_forwarded, event.project.id, count=1)
| DataForwardingPlugin |
python | facebookresearch__faiss | contrib/torch/clustering.py | {
"start": 469,
"end": 1438
} | class ____:
"""Wrapper for a tensor that offers a function to assign the vectors
to centroids. All other implementations offer the same interface"""
def __init__(self, x):
self.x = x
def count(self):
return self.x.shape[0]
def dim(self):
return self.x.shape[1]
def get_subset(self, indices):
return self.x[indices]
def perform_search(self, centroids):
return faiss.knn(self.x, centroids, 1)
def assign_to(self, centroids, weights=None):
D, I = self.perform_search(centroids)
I = I.ravel()
D = D.ravel()
nc, d = centroids.shape
sum_per_centroid = torch.zeros_like(centroids)
if weights is None:
sum_per_centroid.index_add_(0, I, self.x)
else:
sum_per_centroid.index_add_(0, I, self.x * weights[:, None])
# the indices are still in numpy.
return I.cpu().numpy(), D, sum_per_centroid
| DatasetAssign |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 59763,
"end": 67038
} | class ____(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
# hex_value and base_10_value are designed only to simplify
# writing tests to get a consistent representation of value
@property
def hex_value(self):
return Utils.strip_py2_long_suffix(hex(Utils.str_to_number(self.value)))
@property
def base_10_value(self):
return str(Utils.str_to_number(self.value))
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
@classmethod
def for_int(cls, pos, int_value, type=PyrexTypes.c_int_type):
assert isinstance(int_value, int), repr(int_value)
return cls(pos, value=str(int_value), constant_result=int_value, type=type, is_c_literal=True)
@classmethod
def for_size(cls, pos, int_value):
return cls.for_int(pos, int_value, type=PyrexTypes.c_py_ssize_t_type)
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
not self.has_constant_result() or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = Builtin.int_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float or dst_type is Builtin.float_type:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
elif dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=dst_type, is_c_literal=True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=Builtin.int_type, is_c_literal=False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
# (In hex if sufficiently large to cope with Python's string-to-int limitations.
# We use quite a small value of "sufficiently large" - 10**13 is picked as
# the approximate point where hex strings become shorter)
value = Utils.str_to_number(self.value)
formatter = hex if value > (10**13) else str
plain_integer_string = formatter(value)
plain_integer_string = Utils.strip_py2_long_suffix(plain_integer_string)
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
unsigned, longness = self.unsigned, self.longness
literal = self.value_as_c_integer_string()
if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0':
# negative decimal literal => guess longness from type to prevent wrap-around
if self.type.rank >= PyrexTypes.c_longlong_type.rank:
longness = 'LL'
elif self.type.rank >= PyrexTypes.c_long_type.rank:
longness = 'L'
return literal + unsigned + longness
def value_as_c_integer_string(self):
value = self.value
if len(value) <= 2:
# too short to go wrong (and simplifies code below)
return value
neg_sign = ''
if value[0] == '-':
neg_sign = '-'
value = value[1:]
if value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
# 0x123 hex literals and 0123 octal literals work nicely in C
# but C-incompatible Py3 oct/bin notations need conversion
if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit():
# negative hex/octal literal => prevent C compiler from using
# unsigned integer types by converting to decimal (see C standard 6.4.4.1)
value = str(Utils.str_to_number(value))
elif literal_type in 'oO':
value = '0' + value[2:] # '0o123' => '0123'
elif literal_type in 'bB':
value = str(int(value[2:], 2))
elif value.isdigit() and not self.unsigned and not self.longness:
if not neg_sign:
# C compilers do not consider unsigned types for decimal literals,
# but they do for hex (see C standard 6.4.4.1)
value = '0x%X' % int(value)
return neg_sign + value
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
| IntNode |
python | kamyu104__LeetCode-Solutions | Python/max-sum-of-a-pair-with-equal-sum-of-digits.py | {
"start": 58,
"end": 650
} | class ____(object):
def maximumSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def sum_digits(x):
result = 0
while x:
result += x%10
x //= 10
return result
lookup = {}
result = -1
for x in nums:
k = sum_digits(x)
if k not in lookup:
lookup[k] = x
continue
result = max(result, lookup[k]+x)
if x > lookup[k]:
lookup[k] = x
return result
| Solution |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 172422,
"end": 173115
} | class ____(WrapperStore):
"""
Store that explicitly does not support consolidated metadata.
Useful as a proxy for stores like Icechunk, see https://github.com/zarr-developers/zarr-python/pull/3119.
"""
supports_consolidated_metadata = False
def __init__(
self,
store,
*,
read_only: bool = False,
) -> None:
self._store = store.with_read_only(read_only=read_only)
def with_read_only(
self, read_only: bool = False
) -> NoConsolidatedMetadataSupportStore:
return type(self)(
store=self._store,
read_only=read_only,
)
@requires_zarr_v3
| NoConsolidatedMetadataSupportStore |
python | jd__tenacity | tests/test_tenacity.py | {
"start": 58985,
"end": 60423
} | class ____(unittest.TestCase):
@pytest.mark.skipif(
sys.version_info < (3, 0), reason="typeguard not supported for python 2"
)
def test_retry_type_annotations(self):
"""The decorator should maintain types of decorated functions."""
# Just in case this is run with unit-test, return early for py2
if sys.version_info < (3, 0):
return
# Function-level import because we can't install this for python 2.
from typeguard import check_type
def num_to_str(number):
# type: (int) -> str
return str(number)
# equivalent to a raw @retry decoration
with_raw = retry(num_to_str)
with_raw_result = with_raw(1)
# equivalent to a @retry(...) decoration
with_constructor = retry()(num_to_str)
with_constructor_result = with_raw(1)
# These raise TypeError exceptions if they fail
check_type(with_raw, typing.Callable[[int], str])
check_type(with_raw_result, str)
check_type(with_constructor, typing.Callable[[int], str])
check_type(with_constructor_result, str)
@contextmanager
def reports_deprecation_warning():
__tracebackhide__ = True
oldfilters = copy(warnings.filters)
warnings.simplefilter("always")
try:
with pytest.warns(DeprecationWarning):
yield
finally:
warnings.filters = oldfilters
| TestRetryTyping |
python | tornadoweb__tornado | tornado/test/netutil_test.py | {
"start": 6177,
"end": 7038
} | class ____(unittest.TestCase):
def test_same_port_allocation(self):
sockets = bind_sockets(0, "localhost")
try:
port = sockets[0].getsockname()[1]
self.assertTrue(all(s.getsockname()[1] == port for s in sockets[1:]))
finally:
for sock in sockets:
sock.close()
@unittest.skipIf(
not hasattr(socket, "SO_REUSEPORT"), "SO_REUSEPORT is not supported"
)
def test_reuse_port(self):
sockets: typing.List[socket.socket] = []
sock, port = bind_unused_port(reuse_port=True)
try:
sockets = bind_sockets(port, "127.0.0.1", reuse_port=True)
self.assertTrue(all(s.getsockname()[1] == port for s in sockets))
finally:
sock.close()
for sock in sockets:
sock.close()
| TestPortAllocation |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/event_log/base.py | {
"start": 5025,
"end": 5689
} | class ____(
LoadableBy[AssetKey],
):
"""Internal representation of an asset record, as stored in a :py:class:`~dagster._core.storage.event_log.EventLogStorage`.
Users should not invoke this class directly.
"""
storage_id: int
asset_entry: AssetEntry
@classmethod
def _blocking_batch_load(
cls, keys: Iterable[AssetKey], context: LoadingContext
) -> Iterable[Optional["AssetRecord"]]:
records_by_key = {
record.asset_entry.asset_key: record
for record in context.instance.get_asset_records(list(keys))
}
return [records_by_key.get(key) for key in keys]
@record
| AssetRecord |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_returned.py | {
"start": 1022,
"end": 1153
} | class ____:
""" __len__ returns nothing """
def __len__(self): # [invalid-length-returned]
print(3.0)
| NonRegression |
python | pallets__jinja | src/jinja2/environment.py | {
"start": 57767,
"end": 60847
} | class ____:
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen: t.Iterator[str]) -> None:
self._gen = gen
self.disable_buffering()
def dump(
self,
fp: str | t.IO[bytes],
encoding: str | None = None,
errors: str | None = "strict",
) -> None:
"""Dump the complete stream into a file or file-like object.
Per default strings are written, if you want to encode
before writing specify an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, str):
if encoding is None:
encoding = "utf-8"
real_fp: t.IO[bytes] = open(fp, "wb")
close = True
else:
real_fp = fp
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self) # type: ignore
else:
iterable = self # type: ignore
if hasattr(real_fp, "writelines"):
real_fp.writelines(iterable)
else:
for item in iterable:
real_fp.write(item)
finally:
if close:
real_fp.close()
def disable_buffering(self) -> None:
"""Disable the output buffering."""
self._next = partial(next, self._gen)
self.buffered = False
def _buffered_generator(self, size: int) -> t.Iterator[str]:
buf: list[str] = []
c_size = 0
push = buf.append
while True:
try:
while c_size < size:
c = next(self._gen)
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
def enable_buffering(self, size: int = 5) -> None:
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError("buffer size too small")
self.buffered = True
self._next = partial(next, self._buffered_generator(size))
def __iter__(self) -> "TemplateStream":
return self
def __next__(self) -> str:
return self._next() # type: ignore
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| TemplateStream |
python | pypa__hatch | tests/utils/test_platform.py | {
"start": 187,
"end": 1471
} | class ____:
def test_tag(self):
assert Platform().windows is True
def test_default_shell(self):
assert Platform().default_shell == os.environ.get("COMSPEC", "cmd")
def test_format_for_subprocess_list(self):
assert Platform().format_for_subprocess(["foo", "bar"], shell=False) == ["foo", "bar"]
def test_format_for_subprocess_list_shell(self):
assert Platform().format_for_subprocess(["foo", "bar"], shell=True) == ["foo", "bar"]
def test_format_for_subprocess_string(self):
assert Platform().format_for_subprocess("foo bar", shell=False) == "foo bar"
def test_format_for_subprocess_string_shell(self):
assert Platform().format_for_subprocess("foo bar", shell=True) == "foo bar"
def test_home(self):
platform = Platform()
assert platform.home == platform.home == Path(os.path.expanduser("~"))
def test_populate_default_popen_kwargs_executable(self):
platform = Platform()
kwargs = {}
platform.populate_default_popen_kwargs(kwargs, shell=True)
assert not kwargs
kwargs["executable"] = "foo"
platform.populate_default_popen_kwargs(kwargs, shell=True)
assert kwargs["executable"] == "foo"
@pytest.mark.requires_macos
| TestWindows |
python | realpython__materials | fastapi-python-web-apis/main.py | {
"start": 1233,
"end": 4144
} | class ____(BaseModel):
message: str
deleted_item: str
remaining_items_count: int
@app.get("/", tags=["Random Playground"])
def home():
return {"message": "Welcome to the Randomizer API"}
@app.get("/random/{max_value}", tags=["Random Playground"])
def get_random_number(max_value: int):
return {"max": max_value, "random_number": random.randint(1, max_value)}
@app.get("/random-between", tags=["Random Playground"])
def get_random_number_between(
min_value: Annotated[
int,
Query(
title="Minimum Value",
description="The minimum random number",
ge=1,
le=1000,
),
] = 1,
max_value: Annotated[
int,
Query(
title="Maximum Value",
description="The maximum random number",
ge=1,
le=1000,
),
] = 99,
):
if min_value > max_value:
raise HTTPException(
status_code=400, detail="min_value can't be greater than max_value"
)
return {
"min": min_value,
"max": max_value,
"random_number": random.randint(min_value, max_value),
}
@app.post(
"/items", response_model=ItemResponse, tags=["Random Items Management"]
)
def add_item(item: Item):
if item.name in items_db:
raise HTTPException(status_code=400, detail="Item already exists")
items_db.append(item.name)
return ItemResponse(message="Item added successfully", item=item.name)
@app.get(
"/items", response_model=ItemListResponse, tags=["Random Items Management"]
)
def get_randomized_items():
randomized = items_db.copy()
random.shuffle(randomized)
return ItemListResponse(
original_order=items_db,
randomized_order=randomized,
count=len(items_db),
)
@app.put(
"/items/{update_item_name}",
response_model=ItemUpdateResponse,
tags=["Random Items Management"],
)
def update_item(update_item_name: str, item: Item):
if update_item_name not in items_db:
raise HTTPException(status_code=404, detail="Item not found")
if item.name in items_db:
raise HTTPException(
status_code=409, detail="An item with that name already exists"
)
index = items_db.index(update_item_name)
items_db[index] = item.name
return ItemUpdateResponse(
message="Item updated successfully",
old_item=update_item_name,
new_item=item.name,
)
@app.delete(
"/items/{item}",
response_model=ItemDeleteResponse,
tags=["Random Items Management"],
)
def delete_item(item: str):
if item not in items_db:
raise HTTPException(status_code=404, detail="Item not found")
items_db.remove(item)
return ItemDeleteResponse(
message="Item deleted successfully",
deleted_item=item,
remaining_items_count=len(items_db),
)
| ItemDeleteResponse |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 105956,
"end": 108003
} | class ____(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None
):
super().__init__()
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
@compile_compatible_method_lru_cache(maxsize=1)
def forward(
self,
shape: torch.Size,
device: Union[torch.device, str],
dtype: torch.dtype,
mask: Optional[Tensor] = None,
) -> Tensor:
if mask is None:
mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool)
not_mask = (~mask).to(dtype)
y_embed = not_mask.cumsum(1)
x_embed = not_mask.cumsum(2)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
# Copied from transformers.models.maskformer.modeling_maskformer.PredictionBlock
| OneFormerSinePositionEmbedding |
python | aimacode__aima-python | games4e.py | {
"start": 15470,
"end": 15926
} | class ____(TicTacToe):
"""A TicTacToe-like game in which you can only make a move on the bottom
row, or in a square directly above an occupied square. Traditionally
played on a 7x6 board and requiring 4 in a row."""
def __init__(self, h=7, v=6, k=4):
TicTacToe.__init__(self, h, v, k)
def actions(self, state):
return [(x, y) for (x, y) in state.moves
if y == 1 or (x, y - 1) in state.board]
| ConnectFour |
python | ray-project__ray | python/ray/train/base_trainer.py | {
"start": 2353,
"end": 4480
} | class ____(RuntimeError):
"""An error indicating that training has failed."""
_RESTORE_MSG = (
"The Ray Train run failed. Please inspect the previous error messages for a "
"cause. After fixing the issue (assuming that the error is not caused by "
"your own application logic, but rather an error such as OOM), you can restart "
"the run from scratch or continue this run.\n"
"To continue this run, you can use: "
'`trainer = {trainer_cls_name}.restore("{path}")`.'
)
_FAILURE_CONFIG_MSG = (
"To start a new run that will retry on training failures, set "
"`train.RunConfig(failure_config=train.FailureConfig(max_failures))` "
"in the Trainer's `run_config` with `max_failures > 0`, or `max_failures = -1` "
"for unlimited retries."
)
def _train_coordinator_fn(
config: dict, trainer_cls: Type["BaseTrainer"], metadata: dict
):
"""This is the function that defines the logic of the Ray Train coordinator.
This is responsible for setting up a remote instance of the `trainer_cls`
(a different instance than the one calling `trainer.fit` on the driver!)
and running the training loop.
"""
assert metadata is not None, metadata
# Propagate user metadata from the Trainer constructor.
get_session().metadata = metadata
# config already contains merged values.
# Instantiate new Trainer in Trainable.
trainer = trainer_cls(**config)
# Get the checkpoint from Tune and pass it to workers later on.
checkpoint = ray.tune.get_checkpoint()
if checkpoint:
# Set `starting_checkpoint` for auto-recovery fault-tolerance
# as well as manual restoration.
trainer.starting_checkpoint = checkpoint
# else: Train will restore from the user-provided
# `resume_from_checkpoint` == `starting_checkpoint`.
# Evaluate datasets if they are wrapped in a factory.
trainer.datasets = {
k: d() if callable(d) else d for k, d in trainer.datasets.items()
}
trainer.setup()
trainer.training_loop()
@DeveloperAPI
| TrainingFailedError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1145333,
"end": 1146139
} | class ____(sgqlc.types.Type, Node):
"""An option for a discussion poll."""
__schema__ = github_schema
__field_names__ = ("option", "poll", "total_vote_count", "viewer_has_voted")
option = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="option")
"""The text for this option."""
poll = sgqlc.types.Field(DiscussionPoll, graphql_name="poll")
"""The discussion poll that this option belongs to."""
total_vote_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalVoteCount")
"""The total number of votes that have been cast for this option."""
viewer_has_voted = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerHasVoted")
"""Indicates if the viewer has voted for this option in the poll."""
| DiscussionPollOption |
python | dask__dask | dask/array/_array_expr/_blockwise.py | {
"start": 10906,
"end": 11597
} | class ____(Blockwise):
_parameters = ["array", "axes"]
func = staticmethod(np.transpose)
align_arrays = False
adjust_chunks = None
concatenate = None
token = "transpose"
@property
def new_axes(self):
return {}
@property
def name(self):
return self._name
@property
def _meta_provided(self):
return self.array._meta
@property
def dtype(self):
return self._meta.dtype
@property
def out_ind(self):
return self.axes
@property
def kwargs(self):
return {"axes": self.axes}
@property
def args(self):
return (self.array, tuple(range(self.array.ndim)))
| Transpose |
python | python-pillow__Pillow | src/PIL/SgiImagePlugin.py | {
"start": 5378,
"end": 6389
} | class ____(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
assert self.im is not None
rawmode, stride, orientation = self.args
pagesize = self.state.xsize * self.state.ysize
zsize = len(self.mode)
self.fd.seek(512)
for band in range(zsize):
channel = Image.new("L", (self.state.xsize, self.state.ysize))
channel.frombytes(
self.fd.read(2 * pagesize), "raw", "L;16B", stride, orientation
)
self.im.putband(channel.im, band)
return -1, 0
#
# registry
Image.register_decoder("SGI16", SGI16Decoder)
Image.register_open(SgiImageFile.format, SgiImageFile, _accept)
Image.register_save(SgiImageFile.format, _save)
Image.register_mime(SgiImageFile.format, "image/sgi")
Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"])
# End of file
| SGI16Decoder |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/loop_control_flow_illegal_cases_test.py | {
"start": 1560,
"end": 2621
} | class ____(reference_test_base.TestCase,
parameterized.TestCase):
@parameterized.parameters(*itertools.product(
(
[1],
[1, 2],
[1, 2, 3],
),
(
tf_break_in_py_for,
tf_return_in_py_for,
),
))
def test_tf_control_flow_in_py_for(self, l, target):
with self.assertRaisesRegex(NotImplementedError,
'not supported in Python for'):
tf.function(target)(l)
@parameterized.parameters(*itertools.product(
(
1,
2,
3,
),
(
tf_break_in_py_while,
tf_return_in_py_while,
),
))
def test_tf_control_flow_in_py_while(self, n, target):
with self.assertRaisesRegex(
NotImplementedError,
re.compile(
r'.*condition of while loop started as non\-Tensor,'
r' then changed to Tensor.*', re.DOTALL)):
tf.function(target)(n)
if __name__ == '__main__':
tf.test.main()
| LoopControlFlowIllegalCasesTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1025,
"end": 1161
} | class ____(sgqlc.types.Scalar):
"""A (potentially binary) string encoded using base64."""
__schema__ = github_schema
| Base64String |
python | scipy__scipy | scipy/io/tests/test_idl.py | {
"start": 12584,
"end": 15000
} | class ____:
# Test that pointers in arrays are correctly read in
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
assert_(np.all(s.array1d == np.float32(4.)))
assert_(np.all(vect_id(s.array1d) == id(s.array1d[0])))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
assert_(np.all(s.array2d == np.float32(4.)))
assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0])))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
assert_(np.all(s.array3d == np.float32(4.)))
assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0])))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
assert_(np.all(s.array4d == np.float32(4.)))
assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0])))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_(np.all(s.array5d == np.float32(4.)))
assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0])))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
assert_(np.all(s.array6d == np.float32(4.)))
assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0])))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
assert_(np.all(s.array7d == np.float32(4.)))
assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0])))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
assert_(np.all(s.array8d == np.float32(4.)))
assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0])))
| TestPointerArray |
python | Netflix__metaflow | test/core/tests/card_id_append.py | {
"start": 72,
"end": 3847
} | class ____(MetaflowTest):
"""
`current.card['myid']` should be accessible when cards have an `id` argument in decorator
- `current.card.append` should not work when there are no single default editable card.
- if a card has `ALLOW_USER_COMPONENTS=False` then it can still be edited via accessing it with `id` property.
"""
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('environment(vars={"METAFLOW_CARD_NO_WARNING": "True"})')
@tag('card(type="test_editable_card",id="xyz")')
@tag('card(type="test_editable_card",id="abc")')
@steps(0, ["start"])
def step_start(self):
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
self.random_number_2 = random.randint(0, 100)
current.card["abc"].append(TestStringComponent(str(self.random_number)))
# Below line should not work
current.card.append(TestStringComponent(str(self.random_number_2)))
@tag('card(type="test_non_editable_card",id="abc")')
@steps(0, ["end"])
def step_end(self):
# If the card is default non-editable, we can still access it via `current.card[id]`
from metaflow import current
from metaflow.plugins.cards.card_modules.test_cards import TestStringComponent
import random
self.random_number = random.randint(0, 100)
self.random_number_2 = random.randint(0, 100)
current.card["abc"].append(TestStringComponent(str(self.random_number)))
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if run is None:
# This means CliCheck is in context.
for step in flow:
if step.name != "start" or step.name != "end":
continue
cli_check_dict = checker.artifact_dict(step.name, "random_number")
for task_pathspec in cli_check_dict:
task_id = task_pathspec.split("/")[-1]
number = cli_check_dict[task_pathspec]["random_number"]
checker.assert_card(
step.name,
task_id,
(
"test_editable_card"
if step.name == "start"
else "test_non_editable_card"
),
"%d" % number,
card_id="abc",
exact_match=True,
)
else:
# This means MetadataCheck is in context.
for step in flow:
if step.name != "start" or step.name != "end":
continue
meta_check_dict = checker.artifact_dict(step.name, "random_number")
for task_id in meta_check_dict:
random_number = meta_check_dict[task_id]["random_number"]
checker.assert_card(
step.name,
task_id,
(
"test_editable_card"
if step.name == "start"
else "test_non_editable_card"
),
"%d" % random_number,
card_id="abc",
exact_match=True,
)
| CardsWithIdTest |
python | django__django | django/contrib/auth/hashers.py | {
"start": 22409,
"end": 23750
} | class ____(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
self._check_encode_args(password, salt)
password = force_str(password)
salt = force_str(salt)
hash = hashlib.md5((salt + password).encode()).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def decode(self, encoded):
algorithm, salt, hash = encoded.split("$", 2)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"hash": hash,
"salt": salt,
}
def verify(self, password, encoded):
decoded = self.decode(encoded)
encoded_2 = self.encode(password, decoded["salt"])
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("salt"): mask_hash(decoded["salt"], show=2),
_("hash"): mask_hash(decoded["hash"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
return must_update_salt(decoded["salt"], self.salt_entropy)
def harden_runtime(self, password, encoded):
pass
| MD5PasswordHasher |
python | keon__algorithms | tests/test_maths.py | {
"start": 6352,
"end": 6894
} | class ____(unittest.TestCase):
"""[summary]
Test for the file modular_Exponential.py
Arguments:
unittest {[type]} -- [description]
"""
def test_modular_exponential(self):
self.assertEqual(1, modular_exponential(5, 117, 19))
self.assertEqual(pow(1243, 65321, 10 ** 9 + 7),
modular_exponential(1243, 65321, 10 ** 9 + 7))
self.assertEqual(1, modular_exponential(12, 0, 78))
self.assertRaises(ValueError, modular_exponential, 12, -2, 455)
| TestModularExponential |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol6.py | {
"start": 373,
"end": 735
} | class ____(Protocol):
def __call__(self, path: str) -> str: ...
def func1_1(path: str = "") -> str: ...
def func1_2(path: str) -> str: ...
val1_1: Callback1 = func1_1
# This should generate an error.
val1_2: Callback1 = func1_2
val2_1: Callback2 = func1_1
val2_2: Callback2 = func1_2
# Callback with keyword parameter with default arg value.
| Callback2 |
python | pypa__pip | src/pip/_vendor/rich/progress.py | {
"start": 23682,
"end": 24110
} | class ____(ProgressColumn):
"""Renders time elapsed."""
def render(self, task: "Task") -> Text:
"""Show time elapsed."""
elapsed = task.finished_time if task.finished else task.elapsed
if elapsed is None:
return Text("-:--:--", style="progress.elapsed")
delta = timedelta(seconds=max(0, int(elapsed)))
return Text(str(delta), style="progress.elapsed")
| TimeElapsedColumn |
python | bokeh__bokeh | src/bokeh/plotting/contour.py | {
"start": 10495,
"end": 10915
} | class ____:
''' Coordinates for filled contour polygons between a lower and upper level.
The first list contains a list for each polygon. The second list contains
a separate NumPy array for each boundary of that polygon; the first array
is always the outer boundary, subsequent arrays are holes.
'''
xs: list[list[np.ndarray]]
ys: list[list[np.ndarray]]
@dataclass(frozen=True)
| SingleFillCoords |
python | redis__redis-py | benchmarks/base.py | {
"start": 75,
"end": 1355
} | class ____:
ARGUMENTS = ()
def __init__(self):
self._client = None
def get_client(self, **kwargs):
# eventually make this more robust and take optional args from
# argparse
if self._client is None or kwargs:
defaults = {"db": 9}
defaults.update(kwargs)
pool = redis.ConnectionPool(**kwargs)
self._client = redis.Redis(connection_pool=pool)
return self._client
def setup(self, **kwargs):
pass
def run(self, **kwargs):
pass
def run_benchmark(self):
group_names = [group["name"] for group in self.ARGUMENTS]
group_values = [group["values"] for group in self.ARGUMENTS]
for value_set in itertools.product(*group_values):
pairs = list(zip(group_names, value_set))
arg_string = ", ".join(f"{p[0]}={p[1]}" for p in pairs)
sys.stdout.write(f"Benchmark: {arg_string}... ")
sys.stdout.flush()
kwargs = dict(pairs)
setup = functools.partial(self.setup, **kwargs)
run = functools.partial(self.run, **kwargs)
t = timeit.timeit(stmt=run, setup=setup, number=1000)
sys.stdout.write(f"{t:f}\n")
sys.stdout.flush()
| Benchmark |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/shape_ops_test.py | {
"start": 17002,
"end": 30170
} | class ____(test.TestCase, parameterized.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
a = constant_op.constant(7, shape=[], dtype=dtypes.float32)
tiled = array_ops.tile(a, [])
result = self.evaluate(tiled)
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
# multiples could be int32 or int64
for dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testIdentityTileAndGrad(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [1, 1])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 1))
self.assertEqual([4, 1], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 1))).all())
def testEmpty(self):
with self.cached_session():
inp = np.random.rand(2, 3).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [5, 0])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
@test_util.run_deprecated_v1
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.cached_session():
inp = array_ops.placeholder(dtypes.float32) # unknown shape
multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
# are not available when the shape function runs on import.
swapped = False
for i, n in enumerate(gdef.node):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
tiled_imported, = importer.import_graph_def(
gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
def testTypes(self):
types_to_test = {
"bool": (dtypes.bool, bool),
"bfloat16": (dtypes.bfloat16, float),
"float16": (dtypes.float16, float),
"float32": (dtypes.float32, float),
"float64": (dtypes.float64, float),
"complex64": (dtypes.complex64, complex),
"complex128": (dtypes.complex128, complex),
"uint8": (dtypes.uint8, int),
"int8": (dtypes.int8, int),
"int16": (dtypes.int16, int),
"int32": (dtypes.int32, int),
"int64": (dtypes.int64, int),
"uint32": (dtypes.uint32, int),
"uint64": (dtypes.uint64, int),
bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.cached_session():
inp = np.random.rand(4, 1).astype(dtype_np)
a = constant_op.constant(
[cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = array_ops.tile(a, [1, 4])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
@test_util.run_deprecated_v1
def testInvalidDim(self):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, rank, use_gpu):
with self.cached_session(use_gpu=use_gpu):
# Random dims of given rank
input_shape = np.random.randint(1, 4, size=rank)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
multiples = np.random.randint(1, 4, size=rank).astype(np.int32)
tiled = array_ops.tile(a, multiples)
result = self.evaluate(tiled)
self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(
result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
# test low rank, like 5
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=True)
# test high rank, like 10
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=True)
@parameterized.parameters(dtypes.int32, dtypes.int64)
@test_util.run_deprecated_v1
def testGradientSimpleReduction(self, multiples_dtype):
with self.cached_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
multiples = constant_op.constant([1, 4], dtype=multiples_dtype)
tiled = array_ops.tile(a, multiples)
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = self.evaluate(grad)
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@test_util.run_deprecated_v1
def testGradientStridedReduction(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = self.evaluate(grad)
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
@test_util.run_deprecated_v1
def testGradientSimpleReductionOnGPU(self):
with self.session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = self.evaluate(grad)
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
@test_util.run_deprecated_v1
def testGradientStridedReductionOnGPU(self):
with self.session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = self.evaluate(grad)
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
a = constant_op.constant(inp, dtype=dtypes.float64)
tiled = array_ops.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = gradient_checker.compute_gradient_error(
a, list(input_shape), tiled, grad_shape, x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
@test_util.run_deprecated_v1
def testGradientRandomScalar(self):
self._RunAndVerifyGradientResult([], [])
@test_util.run_deprecated_v1
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 1, 1, 1, 1])
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
@test_util.run_deprecated_v1
def testGradientStridedReductionGC(self):
with self.cached_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
@parameterized.parameters(dtypes.int32, dtypes.int64)
@test_util.run_deprecated_v1
def testGradientWithSparseGradWithRank1(self, multiples_dtype):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
multiples = constant_op.constant([3], dtype=dtypes.int64)
outputs = array_ops.gather(array_ops.tile(inputs, multiples),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithSparseGradWithRank3(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
inputs = array_ops.reshape(inputs, [-1, 1, 1])
outputs = array_ops.gather(array_ops.tile(inputs, [3, 4, 2]),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
inp = constant_op.constant(0.0, shape=[1, 1])
tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
tiled = array_ops.tile(
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
def testLargeTensor(self):
# Test case for GItHub issue 46911.
if test_util.is_xla_enabled():
# The following test fails with XLA enabled.
return
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.cached_session():
tiled = array_ops.tile(
np.ones((1, 1, 1)), [100000000, 100000000, 100000000])
self.evaluate(tiled)
if __name__ == "__main__":
test.main()
| TileTest |
python | pydantic__pydantic | pydantic/v1/networks.py | {
"start": 17606,
"end": 18488
} | class ____(Representation):
__slots__ = 'name', 'email'
def __init__(self, name: str, email: str):
self.name = name
self.email = email
def __eq__(self, other: Any) -> bool:
return isinstance(other, NameEmail) and (self.name, self.email) == (other.name, other.email)
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='name-email')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
import_email_validator()
yield cls.validate
@classmethod
def validate(cls, value: Any) -> 'NameEmail':
if value.__class__ == cls:
return value
value = str_validator(value)
return cls(*validate_email(value))
def __str__(self) -> str:
return f'{self.name} <{self.email}>'
| NameEmail |
python | encode__django-rest-framework | tests/schemas/views.py | {
"start": 5677,
"end": 6002
} | class ____(generics.GenericAPIView):
serializer_class = ExampleSerializerModel
def get(self, *args, **kwargs):
from datetime import datetime
now = datetime.now()
serializer = self.get_serializer(data=now.date(), datetime=now)
return Response(serializer.data)
| ExampleGenericAPIViewModel |
python | jazzband__django-oauth-toolkit | oauth2_provider/apps.py | {
"start": 36,
"end": 244
} | class ____(AppConfig):
name = "oauth2_provider"
verbose_name = "Django OAuth Toolkit"
def ready(self):
# Import checks to ensure they run.
from . import checks # noqa: F401
| DOTConfig |
python | Textualize__textual | src/textual/app.py | {
"start": 7021,
"end": 7298
} | class ____(Exception):
"""Raised if suspending the application is not supported.
This exception is raised if [`App.suspend`][textual.app.App.suspend] is called while
the application is running in an environment where this isn't supported.
"""
| SuspendNotSupported |
python | getsentry__sentry | src/sentry/sentry_apps/metrics.py | {
"start": 248,
"end": 626
} | class ____(StrEnum):
"""Actions that Sentry Apps can do"""
# Webhook actions
PREPARE_WEBHOOK = "prepare_webhook"
SEND_WEBHOOK = "send_webhook"
# External Requests
EXTERNAL_REQUEST = "external_request"
# Authorizations
AUTHORIZATIONS = "authorizations"
# Managing Sentry Apps
MANAGEMENT = "management"
@dataclass
| SentryAppInteractionType |
python | numba__numba | numba/core/types/functions.py | {
"start": 13782,
"end": 18908
} | class ____(Callable, Opaque):
"""
A function with an implicit first argument (denoted as *this* below).
"""
def __init__(self, template, this):
# Create a derived template with an attribute *this*
newcls = type(template.__name__ + '.' + str(this), (template,),
dict(this=this))
self.template = newcls
self.typing_key = self.template.key
self.this = this
name = "%s(%s for %s)" % (self.__class__.__name__,
self.typing_key, self.this)
super(BoundFunction, self).__init__(name)
def unify(self, typingctx, other):
if (isinstance(other, BoundFunction) and
self.typing_key == other.typing_key):
this = typingctx.unify_pairs(self.this, other.this)
if this is not None:
# XXX is it right that both template instances are distinct?
return self.copy(this=this)
def copy(self, this):
return type(self)(self.template, this)
@property
def key(self):
# FIXME: With target-overload, the MethodTemplate can change depending
# on the target.
unique_impl = getattr(self.template, "_overload_func", None)
return self.typing_key, self.this, unique_impl
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self.typing_key
def get_call_type(self, context, args, kws):
template = self.template(context)
literal_e = None
nonliteral_e = None
out = None
choice = [True, False] if template.prefer_literal else [False, True]
for uselit in choice:
if uselit:
# Try with Literal
try:
out = template.apply(args, kws)
except Exception as exc:
if not isinstance(exc, errors.NumbaError):
raise exc
if isinstance(exc, errors.ForceLiteralArg):
raise exc
literal_e = exc
out = None
else:
break
else:
# if the unliteral_args and unliteral_kws are the same as the
# literal ones, set up to not bother retrying
unliteral_args = tuple([_unlit_non_poison(a) for a in args])
unliteral_kws = {k: _unlit_non_poison(v)
for k, v in kws.items()}
skip = unliteral_args == args and kws == unliteral_kws
# If the above template application failed and the non-literal
# args are different to the literal ones, try again with
# literals rewritten as non-literals
if not skip and out is None:
try:
out = template.apply(unliteral_args, unliteral_kws)
except Exception as exc:
if isinstance(exc, errors.ForceLiteralArg):
if template.prefer_literal:
# For template that prefers literal types,
# reaching here means that the literal types
# have failed typing as well.
raise exc
nonliteral_e = exc
else:
break
if out is None and (nonliteral_e is not None or literal_e is not None):
header = "- Resolution failure for {} arguments:\n{}\n"
tmplt = _termcolor.highlight(header)
if config.DEVELOPER_MODE:
indent = ' ' * 4
def add_bt(error):
if isinstance(error, BaseException):
# if the error is an actual exception instance, trace it
bt = traceback.format_exception(type(error), error,
error.__traceback__)
else:
bt = [""]
nd2indent = '\n{}'.format(2 * indent)
errstr = _termcolor.reset(nd2indent +
nd2indent.join(_bt_as_lines(bt)))
return _termcolor.reset(errstr)
else:
add_bt = lambda X: ''
def nested_msg(literalness, e):
estr = str(e)
estr = estr if estr else (str(repr(e)) + add_bt(e))
new_e = errors.TypingError(textwrap.dedent(estr))
return tmplt.format(literalness, str(new_e))
raise errors.TypingError(nested_msg('literal', literal_e) +
nested_msg('non-literal', nonliteral_e))
return out
def get_call_signatures(self):
sigs = getattr(self.template, 'cases', [])
is_param = hasattr(self.template, 'generic')
return sigs, is_param
| BoundFunction |
python | tiangolo__fastapi | docs_src/query_param_models/tutorial002_pv1_py310.py | {
"start": 120,
"end": 466
} | class ____(BaseModel):
class Config:
extra = "forbid"
limit: int = Field(100, gt=0, le=100)
offset: int = Field(0, ge=0)
order_by: Literal["created_at", "updated_at"] = "created_at"
tags: list[str] = []
@app.get("/items/")
async def read_items(filter_query: FilterParams = Query()):
return filter_query
| FilterParams |
python | huggingface__transformers | src/transformers/models/dinov2/modeling_dinov2.py | {
"start": 7812,
"end": 10203
} | class ____(nn.Module):
def __init__(self, config: Dinov2Config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer, attention_probs
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2
| Dinov2SelfAttention |
python | django__django | tests/validation/models.py | {
"start": 5539,
"end": 5677
} | class ____(Product):
class Meta:
required_db_features = {
"supports_table_check_constraints",
}
| ChildProduct |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_hourly_reports.py | {
"start": 2382,
"end": 7725
} | class ____(HourlyReportsTestWithStateChangesAfterMigration):
stream_name = "age_gender_audience_report_hourly"
report_file = "age_gender_audience_report_hourly"
records_number = 24
state_file = "hourly_reports_state"
incremental_report_file = "age_gender_audience_report_hourly_incremental"
report_file_with_records_further_start_date = "age_gender_audience_report_hourly_with_record_further_config_start_date"
state_file_legacy = "hourly_reports_state_legacy"
state_file_after_migration = "hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
incremental_report_file_with_records_further_cursor = "age_gender_audience_report_hourly_incremental_with_records_further_cursor"
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for second read
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
# for no config start date test
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Hourly", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
| TestAgeGenderAudienceReportHourlyStream |
python | google__pytype | pytype/tests/test_dataclasses.py | {
"start": 115,
"end": 24893
} | class ____(test_base.BaseTest):
"""Tests for @dataclass."""
def test_basic(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class Foo:
x: bool
y: int
z: str
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class Foo:
x: bool
y: int
z: str
def __init__(self, x: bool, y: int, z: str) -> None: ...
""",
)
def test_late_annotations(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class Foo:
x: 'Foo'
y: str
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class Foo:
x: Foo
y: str
def __init__(self, x: Foo, y: str) -> None: ...
""",
)
def test_redefine(self):
"""The first annotation should determine the order."""
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class Foo:
x: int
y: int
x: str = 'hello'
y = 10
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class Foo:
x: str = ...
y: int = ...
def __init__(self, x: str = ..., y: int = ...) -> None: ...
""",
)
def test_redefine_as_method(self):
ty, errors = self.InferWithErrors("""
import dataclasses
@dataclasses.dataclass
class Foo:
x: str = 'hello'
y: int = 10
def x(self): # annotation-type-mismatch[e]
return 10
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class Foo:
x: str = ...
y: int = ...
def __init__(self, x: str = ..., y: int = ...) -> None: ...
""",
)
self.assertErrorRegexes(
errors, {"e": r"Annotation: str.*Assignment: Callable"}
)
def test_no_init(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass(init=False)
class Foo:
x: bool
y: int
z: str
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class Foo:
x: bool
y: int
z: str
""",
)
def test_explicit_init(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass(init=True)
class Foo:
x: bool
y: int
def __init__(self, a: bool):
self.x = a
self.y = 0
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict
@dataclasses.dataclass
class Foo:
x: bool
y: int
def __init__(self, a: bool) -> None: ...
""",
)
def test_init_unknown_base(self):
self.CheckWithErrors("""
import dataclasses
from foo import Base # pytype: disable=import-error
@dataclasses.dataclass
class A(Base):
x: int
A(x=42)
A(x="wrong") # wrong-arg-types
A(x=42, y="from_base")
A(42, "from_base")
""")
def test_init_dynamic_base(self):
self.CheckWithErrors("""
import dataclasses
class Base:
_HAS_DYNAMIC_ATTRIBUTES = True
@dataclasses.dataclass
class A(Base):
x: int
A(x=42)
A(x="wrong") # wrong-arg-types
A(x=42, y="from_base")
A(42, "from_base")
""")
def test_field(self):
ty = self.Infer("""
from typing import List
import dataclasses
@dataclasses.dataclass()
class Foo:
x: bool = dataclasses.field(default=True)
y: List[int] = dataclasses.field(default_factory=list)
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, List, Union
@dataclasses.dataclass
class Foo:
x: bool = ...
y: List[int] = ...
def __init__(self, x: bool = ..., y: List[int] = ...) -> None: ...
""",
)
def test_type_mismatch(self):
self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass()
class Foo:
x: bool = 10 # annotation-type-mismatch
""")
def test_type_mismatch_on_none(self):
self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass()
class Foo:
x: int = None # annotation-type-mismatch
""")
def test_field_type_mismatch(self):
self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass()
class Foo:
x: bool = dataclasses.field(default=10) # annotation-type-mismatch
""")
def test_factory_type_mismatch(self):
self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass()
class Foo:
x: bool = dataclasses.field(default_factory=set) # annotation-type-mismatch
""")
def test_factory_type_mismatch_output(self):
err = self.CheckWithErrors("""
import dataclasses
from typing import Any, List, Union
def f() -> Union[int, str]:
if __random__:
return 1
else:
return "hello"
@dataclasses.dataclass
class Foo:
x: List[int] = dataclasses.field(default_factory=f) # annotation-type-mismatch[e]
""")
self.assertErrorRegexes(err, {"e": r"Union\[int, str\]"})
def test_field_no_init(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass()
class Foo:
x: bool = dataclasses.field(default=True)
y: int = dataclasses.field(init=False)
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict
@dataclasses.dataclass
class Foo:
x: bool = ...
y: int
def __init__(self, x: bool = ...) -> None: ...
""",
)
def test_field_init_no_default(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass()
class Foo:
x: bool = dataclasses.field()
y: int
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict
@dataclasses.dataclass
class Foo:
x: bool
y: int
def __init__(self, x: bool, y: int) -> None: ...
""",
)
def test_bad_default_param_order(self):
self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass() # invalid-function-definition>=3.11
class Foo: # invalid-function-definition<3.11
x: int = 10
y: str
""")
def test_any(self):
self.Check("""
import dataclasses
from typing import Any
@dataclasses.dataclass
class Foo:
foo: Any = None
""")
def test_instantiate_field_type(self):
self.Check("""
import dataclasses
@dataclasses.dataclass
class Foo:
def foo(self):
for field in dataclasses.fields(self):
field.type()
""")
def test_subclass(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass()
class Foo:
w: float
x: bool = dataclasses.field(default=True)
y: int = dataclasses.field(init=False)
class Bar(Foo):
def get_w(self):
return self.w
def get_x(self):
return self.x
def get_y(self):
return self.y
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class Foo:
w: float
x: bool = ...
y: int
def __init__(self, w: float, x: bool = ...) -> None: ...
class Bar(Foo):
def get_w(self) -> float: ...
def get_x(self) -> bool : ...
def get_y(self) -> int: ...
""",
)
def test_subclass_override(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class Foo:
w: float
x: bool = dataclasses.field(default=True)
y: int = dataclasses.field(init=False)
@dataclasses.dataclass
class Bar(Foo):
w: int
z: bool = dataclasses.field(default=True)
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class Foo:
w: float
x: bool = ...
y: int
def __init__(self, w: float, x: bool = ...) -> None: ...
@dataclasses.dataclass
class Bar(Foo):
w: int
z: bool = ...
def __init__(self, w: int, x: bool = ..., z: bool = ...) -> None: ...
""",
)
def test_multiple_inheritance(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class A:
a: int
@dataclasses.dataclass
class B:
b: str
@dataclasses.dataclass
class C(B, A):
c: int
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class A:
a: int
def __init__(self, a: int) -> None: ...
@dataclasses.dataclass
class B:
b: str
def __init__(self, b: str) -> None: ...
@dataclasses.dataclass
class C(B, A):
c: int
def __init__(self, a: int, b: str, c: int) -> None: ...
""",
)
def test_use_late_annotation(self):
self.Check("""
import dataclasses
from typing import Optional
@dataclasses.dataclass
class Foo:
foo: Optional['Foo'] = None
@dataclasses.dataclass
class Bar:
bar: Foo = dataclasses.field(default_factory=Foo)
""")
def test_union(self):
self.Check("""
import dataclasses
from typing import Optional
@dataclasses.dataclass
class Foo:
foo: Optional[str] = ''
""")
def test_union_late_annotation(self):
# This test is deliberately complicated to exercise various aspects of late
# initialization and method body analysis.
ty = self.Infer("""
import dataclasses
from typing import Optional, Union
@dataclasses.dataclass
class Tree:
children: 'Node'
def get_children(self) -> 'Node':
return self.children
def get_leaf(self) -> int:
if not isinstance(self.children, Tree):
return self.children.value
return 0
@dataclasses.dataclass
class Root(Tree):
pass
@dataclasses.dataclass
class IntLeaf:
value: int
@dataclasses.dataclass
class StrLeaf:
label: str
def get_value(x: Root):
ch = x.get_children()
if isinstance(ch, Tree):
return None
elif isinstance(ch, IntLeaf):
return ch.value
else:
return ch.label
Node = Union[Tree, IntLeaf, StrLeaf]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Optional, Union
import dataclasses
Node = Union[IntLeaf, StrLeaf, Tree]
@dataclasses.dataclass
class IntLeaf:
value: int
def __init__(self, value: int) -> None: ...
@dataclasses.dataclass
class StrLeaf:
label: str
def __init__(self, label: str) -> None: ...
@dataclasses.dataclass
class Tree:
children: Union[IntLeaf, StrLeaf, Tree]
def __init__(self, children: Union[IntLeaf, StrLeaf, Tree]) -> None: ...
def get_children(self) -> Union[IntLeaf, StrLeaf, Tree]: ...
def get_leaf(self) -> int: ...
@dataclasses.dataclass
class Root(Tree):
def __init__(self, children: Union[IntLeaf, StrLeaf, Tree]) -> None: ...
def get_value(x: Root) -> Optional[Union[int, str]]: ...
""",
)
def test_reuse_attribute_name(self):
self.Check("""
import dataclasses
from typing import Optional
@dataclasses.dataclass
class Foo:
x: Optional[str] = None
@dataclasses.dataclass
class Bar:
x: str
""")
def test_initvar(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class A:
x: dataclasses.InitVar[str]
y: int = 10
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class A:
y: int = ...
def __init__(self, x: str, y: int = ...) -> None: ...
""",
)
def test_initvar_default(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class A:
x: dataclasses.InitVar[str] = 'hello'
y: int = 10
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class A:
x: dataclasses.InitVar[str] = ...
y: int = ...
def __init__(self, x: str = ..., y: int = ...) -> None: ...
""",
)
def test_initvar_late(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class A:
w: dataclasses.InitVar['Foo']
x: dataclasses.InitVar['str'] = 'hello'
y: int = 10
class Foo:
pass
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class A:
x: dataclasses.InitVar[str] = ...
y: int = ...
def __init__(self, w: Foo, x: str = ..., y: int = ...) -> None: ...
class Foo: ...
""",
)
def test_initvar_inheritance(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class A:
x: dataclasses.InitVar[str]
y: int = 10
@dataclasses.dataclass
class B(A):
z: dataclasses.InitVar[int] = 42
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Union
@dataclasses.dataclass
class A:
y: int = ...
def __init__(self, x: str, y: int = ...) -> None: ...
@dataclasses.dataclass
class B(A):
z: dataclasses.InitVar[int] = ...
def __init__(self, x: str, y: int = ..., z: int = ...) -> None: ...
""",
)
def test_classvar(self):
ty = self.Infer("""
from typing import ClassVar
import dataclasses
@dataclasses.dataclass
class Foo:
x: ClassVar[int] = 10
y: str = 'hello'
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import ClassVar, Dict
@dataclasses.dataclass
class Foo:
y: str = ...
x: ClassVar[int]
def __init__(self, y: str = ...) -> None: ...
""",
)
def test_duplicate_inner_class(self):
ty = self.Infer("""
import dataclasses
class Foo:
@dataclasses.dataclass
class Inner:
a: int
class Bar:
@dataclasses.dataclass
class Inner:
b: str
Inner1 = Foo.Inner
Inner2 = Bar.Inner
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict
class Foo:
@dataclasses.dataclass
class Inner:
a: int
def __init__(self, a: int) -> None: ...
class Bar:
@dataclasses.dataclass
class Inner:
b: str
def __init__(self, b: str) -> None: ...
Inner1 = Foo.Inner
Inner2 = Bar.Inner
""",
)
def test_check_field_against_container(self):
self.Check("""
import dataclasses
from typing import List
@dataclasses.dataclass
class NHNetConfig:
passage_list: List[str] = dataclasses.field(
default_factory=lambda: [chr(i) for i in range(5)])
""")
def test_field_wrapper(self):
ty = self.Infer("""
import dataclasses
def field_wrapper(**kwargs):
return dataclasses.field(**kwargs)
@dataclasses.dataclass
class Foo:
x: int = dataclasses.field(default=0)
y: int = field_wrapper(default=1)
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Any, Dict
def field_wrapper(**kwargs) -> Any: ...
@dataclasses.dataclass
class Foo:
x: int = ...
y: int = ...
def __init__(self, x: int = ..., y: int = ...) -> None: ...
""",
)
def test_property(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class Foo:
x: bool
y: int
@property
def z(self) -> str:
return "hello world"
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Annotated, Dict
@dataclasses.dataclass
class Foo:
x: bool
y: int
z: Annotated[str, 'property']
def __init__(self, x: bool, y: int) -> None: ...
""",
)
def test_generic(self):
ty = self.Infer("""
import dataclasses
from typing import Generic, TypeVar
T = TypeVar('T')
@dataclasses.dataclass
class Foo(Generic[T]):
x: T
foo1 = Foo(x=0)
x1 = foo1.x
foo2 = Foo[str](x=__any_object__)
x2 = foo2.x
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import Dict, Generic, TypeVar
T = TypeVar('T')
@dataclasses.dataclass
class Foo(Generic[T]):
x: T
def __init__(self, x: T) -> None:
self = Foo[T]
foo1: Foo[int]
x1: int
foo2: Foo[str]
x2: str
""",
)
def test_dataclass_attribute_with_getattr(self):
# Tests that the type of the 'x' attribute is correct in Child.__init__
# (i.e., the __getattr__ return type shouldn't be used).
self.Check("""
import dataclasses
from typing import Dict, Sequence
class Base:
def __init__(self, x: str):
self.x = x
def __getattr__(self, name: str) -> 'Base':
return self
class Child(Base):
def __init__(self, x: str, children: Sequence['Child']):
super().__init__(x)
self._children: Dict[str, Child] = {}
for child in children:
self._children[child.x] = child
@dataclasses.dataclass
class Container:
child: Child
""")
@test_utils.skipBeforePy((3, 10), "kw_only parameter is new in 3.10")
def test_sticky_kwonly(self):
self.Check("""
import dataclasses
@dataclasses.dataclass
class A():
a1: int
_: dataclasses.KW_ONLY
a2: int = dataclasses.field(default_factory=lambda: 0)
@dataclasses.dataclass
class B(A):
b1: str
b = B(1, '1')
""")
@test_utils.skipBeforePy((3, 10), "kw_only parameter is new in 3.10")
def test_sticky_kwonly_error(self):
self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass # dataclass-error>=3.11
class A: # dataclass-error<3.11
a1: int
_a: dataclasses.KW_ONLY
a2: int = dataclasses.field(default_factory=lambda: 0)
_b: dataclasses.KW_ONLY
a3: int = 10
""")
@test_utils.skipBeforePy((3, 10), "kw_only parameter is new in 3.10")
def test_sticky_kwonly_override(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class A():
a1: int
_: dataclasses.KW_ONLY
a2: int = dataclasses.field(default_factory=lambda: 0)
a3: int = dataclasses.field(kw_only=False)
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
@dataclasses.dataclass
class A:
a1: int
a2: int = ...
a3: int
_: dataclasses.KW_ONLY
def __init__(self, a1: int, a3: int, *, a2: int = ...) -> None: ...
""",
)
@test_utils.skipBeforePy((3, 10), "KW_ONLY is new in 3.10")
def test_kwonly_and_nonfield_default(self):
ty = self.Infer("""
import dataclasses
@dataclasses.dataclass
class C:
_: dataclasses.KW_ONLY
x: int = 0
y: str
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
@dataclasses.dataclass
class C:
x: int = ...
y: str
_: dataclasses.KW_ONLY
def __init__(self, *, x: int = ..., y: str) -> None: ...
""",
)
@test_utils.skipBeforePy((3, 10), "KW_ONLY is new in 3.10")
def test_kwonly_and_kwargs(self):
self.Check("""
import dataclasses
@dataclasses.dataclass
class C:
_: dataclasses.KW_ONLY
x: int
def f(**kwargs):
return C(**kwargs)
""")
def test_star_import(self):
with self.DepTree([(
"foo.pyi",
"""
import dataclasses
""",
)]):
ty = self.Infer("""
import dataclasses
from foo import *
@dataclasses.dataclass
class X:
b: int
a: str = ...
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
@dataclasses.dataclass
class X:
b: int
a: str = ...
def __init__(self, b: int, a: str = ...) -> None: ...
""",
)
def test_replace_wrong_keyword_args(self):
self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass
class Test:
x: int
x = Test(1)
dataclasses.replace(x, y=1, z=2) # wrong-keyword-args
""")
def test_replace_not_a_dataclass(self):
self.CheckWithErrors("""
import dataclasses
class Test:
pass
dataclasses.replace(Test(), y=1, z=2) # wrong-arg-types
""")
def test_replace_late_annotation(self):
# Regression test: LateAnnotations (like `z: Z`) should behave
# like their underlying types once resolved. The dataclass overlay
# relies on this behavior.
self.Check("""
from __future__ import annotations
import dataclasses
@dataclasses.dataclass
class A:
z: Z
def do(self):
return dataclasses.replace(self.z, name="A")
@dataclasses.dataclass
class Z:
name: str
""")
def test_replace_as_method_with_kwargs(self):
# This is a weird case where replace is added as a method, then called
# with kwargs. This makes pytype unable to see that `self` is the object
# being modified, and also caused a crash when the dataclass overlay tries
# to unpack the object being modified from the args.
self.Check("""
import dataclasses
@dataclasses.dataclass
class WithKwargs:
replace = dataclasses.replace
def do(self, **kwargs):
return self.replace(**kwargs)
""")
def test_replace_subclass(self):
self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass
class Base:
name: str
@dataclasses.dataclass
class Sub(Base):
index: int
a = Sub(name="a", index=0)
dataclasses.replace(a, name="b", index=2)
dataclasses.replace(a, name="c", idx=3) # wrong-keyword-args
""")
def test_replace_wrong_type(self):
errors = self.CheckWithErrors("""
import dataclasses
@dataclasses.dataclass
class C:
name: str
dataclasses.replace(C('x'), name=42) # wrong-arg-types[e]
""")
self.assertErrorSequences(
errors, {"e": ["Expected", "str", "Actual", "int"]}
)
def test_replace_unknown_base(self):
self.CheckWithErrors("""
import dataclasses
from foo import Base # pytype: disable=import-error
@dataclasses.dataclass
class A(Base):
x: int
a = A(x=42)
dataclasses.replace(a, x="wrong") # wrong-arg-types
dataclasses.replace(a, y="from_base")
""")
def test_replace_dynamic_base(self):
self.CheckWithErrors("""
import dataclasses
class Base:
_HAS_DYNAMIC_ATTRIBUTES = True
@dataclasses.dataclass
class A(Base):
x: int
a = A(x=42)
dataclasses.replace(a, x="wrong") # wrong-arg-types
dataclasses.replace(a, y="from_base")
""")
| TestDataclass |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 931452,
"end": 931935
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of ReopenPullRequest"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "pull_request")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
"""The pull request that was reopened."""
| ReopenPullRequestPayload |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/interfaces.py | {
"start": 15374,
"end": 15718
} | class ____(TypedDict):
"""Dictionary representing the reflected comment corresponding to
the :attr:`_schema.Table.comment` attribute.
The :class:`.ReflectedTableComment` structure is returned by the
:meth:`.Inspector.get_table_comment` method.
"""
text: Optional[str]
"""text of the comment"""
| ReflectedTableComment |
python | kamyu104__LeetCode-Solutions | Python/subsequence-of-size-k-with-the-largest-even-sum.py | {
"start": 80,
"end": 1769
} | class ____(object):
def largestEvenSum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
nth_element(nums, k-1, compare=lambda a, b: a > b)
total = sum(nums[i] for i in xrange(k))
if total%2 == 0:
return total
min_k = [float("inf")]*2
for i in xrange(k):
min_k[nums[i]%2] = min(min_k[nums[i]%2], nums[i])
result = -1
for i in xrange(k, len(nums)):
result = max(result, total-min_k[not (nums[i]%2)]+nums[i])
return result
| Solution |
python | huggingface__transformers | src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py | {
"start": 2158,
"end": 4421
} | class ____(nn.Module):
def __init__(
self,
config: MobileNetV1Config,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: Optional[int] = 1,
groups: Optional[int] = 1,
bias: bool = False,
use_normalization: Optional[bool] = True,
use_activation: Optional[Union[bool, str]] = True,
) -> None:
super().__init__()
self.config = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
padding = 0 if config.tf_padding else int((kernel_size - 1) / 2)
self.convolution = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
padding_mode="zeros",
)
if use_normalization:
self.normalization = nn.BatchNorm2d(
num_features=out_channels,
eps=config.layer_norm_eps,
momentum=0.9997,
affine=True,
track_running_stats=True,
)
else:
self.normalization = None
if use_activation:
if isinstance(use_activation, str):
self.activation = ACT2FN[use_activation]
elif isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
else:
self.activation = None
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.config.tf_padding:
features = apply_tf_padding(features, self.convolution)
features = self.convolution(features)
if self.normalization is not None:
features = self.normalization(features)
if self.activation is not None:
features = self.activation(features)
return features
@auto_docstring
| MobileNetV1ConvLayer |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/cwise_ops_test.py | {
"start": 32754,
"end": 37191
} | class ____(test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with test_util.force_cpu():
inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return self.evaluate(z)
def _computeLiteralAndTensor(self, x, y, dtype, func):
with test_util.force_cpu():
iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return self.evaluate(z)
def _compareBinary(self, x, y, dtype, np_func, tf_func):
# astype and assertAllClose do not properly handle bfloat16 values
np_ans = np_func(x, y)
if np_func != np.true_divide:
# for true_divide the result is a float, event for integer args.
np_ans = np_ans.astype(np.float32 if dtype == dtypes_lib.bfloat16
else dtype.as_numpy_dtype)
rtol = 1e-2 if dtype in (dtypes_lib.bfloat16, dtypes_lib.float16) else 1e-6
self.assertAllClose(np_ans,
self._computeTensorAndLiteral(x, y, dtype, tf_func),
rtol=rtol)
self.assertAllClose(np_ans,
self._computeLiteralAndTensor(x, y, dtype, tf_func),
rtol=rtol)
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with test_util.force_cpu():
self.assertAllClose(
np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))
def testOverload(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.bfloat16,
dtypes_lib.uint8,
dtypes_lib.uint16,
dtypes_lib.uint32,
dtypes_lib.uint64,
dtypes_lib.int8,
dtypes_lib.int16,
dtypes_lib.int32,
dtypes_lib.int64,
dtypes_lib.complex64,
dtypes_lib.complex128,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
(np.mod, _MOD),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
with self.subTest(dtype=dtype, np_func=np_func, tf_func=tf_func):
if dtype in (dtypes_lib.complex64,
dtypes_lib.complex128) and tf_func in (_FLOORDIV, _MOD):
continue # floordiv makes no sense for complex
if dtype in (dtypes_lib.uint8, dtypes_lib.uint16, dtypes_lib.uint32,
dtypes_lib.uint64) and tf_func == _POW:
continue # power not supported for unsigned types
self._compareBinary(10, 3, dtype, np_func, tf_func)
def testOverloadComparisons(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.uint8,
dtypes_lib.uint16,
dtypes_lib.uint32,
dtypes_lib.uint64,
dtypes_lib.int8,
dtypes_lib.int16,
dtypes_lib.int32,
dtypes_lib.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
with self.subTest(dtype=dtype, np_func=np_func, tf_func=tf_func):
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),
(np.logical_xor, _XOR), (np.equal, math_ops.equal),
(np.not_equal, math_ops.not_equal)]
for np_func, tf_func in logical_funcs:
with self.subTest(np_func=np_func, tf_func=tf_func):
self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False], dtypes_lib.bool,
np_func, tf_func)
self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)
| MathOpsOverloadTest |
python | spyder-ide__spyder | spyder/plugins/completion/plugin.py | {
"start": 1916,
"end": 51415
} | class ____(SpyderPluginV2):
"""
Spyder completion plugin.
This class provides a completion and linting plugin for the editor in
Spyder.
This plugin works by forwarding all the completion/linting requests to a
set of :class:`SpyderCompletionProvider` instances that are discovered
and registered via entrypoints.
This plugin can assume that `fallback`, `snippets` and `lsp`
completion providers are available, since they are included as part of
Spyder.
"""
NAME = 'completions'
CONF_SECTION = 'completions'
REQUIRES = [Plugins.Preferences]
OPTIONAL = [
Plugins.MainInterpreter,
Plugins.MainMenu,
Plugins.IPythonConsole,
Plugins.PythonpathManager,
Plugins.StatusBar,
]
CONF_FILE = False
# Additional configuration tabs for this plugin, this attribute is
# initialized dynamically based on the provider information.
ADDITIONAL_CONF_TABS = {}
# The configuration page is created dynamically based on the providers
# loaded
CONF_WIDGET_CLASS = None
# Container used to store graphical widgets
CONTAINER_CLASS = CompletionContainer
# ------------------------------- Signals ---------------------------------
sig_response_ready = Signal(str, int, dict)
"""
This signal is used to receive a response from a completion provider.
Parameters
----------
completion_client_name: str
Name of the completion client that produced this response.
request_seq: int
Sequence number for the request.
response: dict
Actual request corpus response.
"""
sig_provider_ready = Signal(str)
"""
This signal is used to indicate that a completion provider is ready
to handle requests.
Parameters
----------
completion_client_name: str
Name of the completion client.
"""
sig_pythonpath_changed = Signal(object, bool)
"""
This signal is used to receive changes on the Python path values handled
by Spyder.
Parameters
----------
new_path: list of str
New PythonPath settings.
prioritize
Whether to prioritize PYTHONPATH in sys.path
"""
_sig_interpreter_changed = Signal(str)
"""
This private signal is used to handle changes in the Python interpreter
done by other plugins.
Parameters
----------
path: str
Path to the new interpreter.
"""
sig_language_completions_available = Signal(dict, str)
"""
This signal is used to indicate that completion services are available
for a given programming language.
Parameters
----------
completion_capabilites: dict
Available configurations supported by the providers, it should conform
to `spyder.plugins.completion.api.SERVER_CAPABILITES`.
language: str
Name of the programming language whose completion capabilites are
available.
"""
sig_open_file = Signal(str)
"""
This signal is used to open a file in the editor.
Parameters
----------
path: str
Path to a file to open with the editor.
"""
sig_stop_completions = Signal(str)
"""
This signal is used to stop completion services on other Spyder plugins
that depend on them.
Parameters
----------
language: str
Name of the programming language whose completion services are not
available.
"""
# --------------------------- Other constants -----------------------------
RUNNING = 'running'
STOPPED = 'stopped'
SKIP_INTERMEDIATE_REQUESTS = {
CompletionRequestTypes.DOCUMENT_COMPLETION
}
AGGREGATE_RESPONSES = {
CompletionRequestTypes.DOCUMENT_COMPLETION
}
def __init__(self, parent, configuration=None):
super().__init__(parent, configuration)
# Available completion providers
self._available_providers = {}
# Instantiated completion providers
self.providers = {}
# Mapping that indicates if there are completion services available
# for a given language
self.language_status = {}
# Mapping that contains the ids and the current completion/linting
# requests in progress
self.requests = {}
# Current request sequence identifier
self.req_id = 0
# Lock to prevent concurrent access to requests mapping
self.collection_mutex = QRecursiveMutex()
# Completion request priority
self.source_priority = {}
# Completion provider speed: slow or fast
self.provider_speed = {}
# Timeout limit for a response to be received
self.wait_for_ms = self.get_conf('completions_wait_for_ms')
# Save application menus to create if/when MainMenu is available.
self.application_menus_to_create = []
# Save items to add to application menus if/when MainMenu is
# available.
self.items_to_add_to_application_menus = []
# Find and instantiate all completion providers registered via
# entrypoints
for entry_point in entry_points(group=COMPLETION_ENTRYPOINT):
try:
logger.debug(f'Loading entry point: {entry_point}')
Provider = entry_point.load()
self._instantiate_and_register_provider(Provider)
except Exception as e:
logger.warning('Failed to load completion provider from entry '
f'point {entry_point}')
raise e
# Register statusbar widgets
self.register_statusbar_widgets(plugin_loaded=False)
# Define configuration page and tabs
(conf_providers, conf_tabs) = self.gather_providers_and_configtabs()
self.CONF_WIDGET_CLASS = partialclass(
CompletionConfigPage, providers=conf_providers)
self.ADDITIONAL_CONF_TABS = {'completions': conf_tabs}
# ---- SpyderPluginV2 API
@staticmethod
def get_name() -> str:
return _('Completion and linting')
@staticmethod
def get_description() -> str:
return _('Handle code completion, analysis, formatting, introspection, '
'folding and more via the Language Server Protocol and other '
'providers.')
@classmethod
def get_icon(cls):
return cls.create_icon('completions')
def on_initialize(self):
self.start_all_providers()
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.MainInterpreter)
def on_maininterpreter_available(self):
maininterpreter = self.get_plugin(Plugins.MainInterpreter)
# This will allow people to change the interpreter used for completions
# if they disable the IPython console.
if not self.is_plugin_enabled(Plugins.IPythonConsole):
maininterpreter.sig_interpreter_changed.connect(
self._sig_interpreter_changed
)
@on_plugin_available(plugin=Plugins.StatusBar)
def on_statusbar_available(self):
container = self.get_container()
self.statusbar = self.get_plugin(Plugins.StatusBar)
for sb in container.all_statusbar_widgets():
self.statusbar.add_status_widget(sb)
@on_plugin_available(plugin=Plugins.MainMenu)
def on_mainmenu_available(self):
main_menu = self.get_plugin(Plugins.MainMenu)
# Create requested application menus.
for args, kwargs in self.application_menus_to_create:
main_menu.create_application_menu(*args, **kwargs)
# Add items to application menus.
for args, kwargs in self.items_to_add_to_application_menus:
main_menu.add_item_to_application_menu(*args, **kwargs)
@on_plugin_available(plugin=Plugins.PythonpathManager)
def on_pythonpath_manager_available(self):
pythonpath_manager = self.get_plugin(Plugins.PythonpathManager)
pythonpath_manager.sig_pythonpath_changed.connect(
self.sig_pythonpath_changed)
@on_plugin_available(plugin=Plugins.IPythonConsole)
def on_ipython_console_available(self):
ipyconsole = self.get_plugin(Plugins.IPythonConsole)
ipyconsole.sig_interpreter_changed.connect(
self._sig_interpreter_changed
)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.MainInterpreter)
def on_maininterpreter_teardown(self):
maininterpreter = self.get_plugin(Plugins.MainInterpreter)
# We only connect to this signal if the IPython console is not enabled
if not self.is_plugin_enabled(Plugins.IPythonConsole):
maininterpreter.sig_interpreter_changed.disconnect(
self._sig_interpreter_changed
)
@on_plugin_teardown(plugin=Plugins.StatusBar)
def on_statusbar_teardown(self):
container = self.get_container()
self.statusbar = self.get_plugin(Plugins.StatusBar)
for sb in container.all_statusbar_widgets():
self.statusbar.remove_status_widget(sb.ID)
@on_plugin_teardown(plugin=Plugins.MainMenu)
def on_mainmenu_teardown(self):
main_menu = self.get_plugin(Plugins.MainMenu)
signature = inspect.signature(main_menu.add_item_to_application_menu)
for args, kwargs in self.application_menus_to_create:
menu_id = args[0]
main_menu.remove_application_menu(menu_id)
for args, kwargs in self.items_to_add_to_application_menus:
binding = signature.bind(*args, **kwargs)
binding.apply_defaults()
item = binding.arguments['item']
menu_id = binding.arguments['menu_id']
item_id = None
if hasattr(item, 'action_id'):
item_id = item.action_id
elif hasattr(item, 'menu_id'):
item_id = item.menu_id
if item_id is not None:
main_menu.remove_item_from_application_menu(
item_id, menu_id=menu_id)
@on_plugin_teardown(plugin=Plugins.PythonpathManager)
def on_pythonpath_manager_teardown(self):
pythonpath_manager = self.get_plugin(Plugins.PythonpathManager)
pythonpath_manager.sig_pythonpath_changed.disconnect(
self.sig_pythonpath_changed)
@on_plugin_teardown(plugin=Plugins.IPythonConsole)
def on_ipython_console_teardowm(self):
ipyconsole = self.get_plugin(Plugins.IPythonConsole)
ipyconsole.sig_interpreter_changed.disconnect(
self._sig_interpreter_changed
)
# ---- Public API
def stop_all_providers(self):
"""Stop all running completion providers."""
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
# TODO: Remove status bar widgets
provider_info['instance'].shutdown()
def can_close(self) -> bool:
"""Check if any provider has any pending task."""
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider = provider_info['instance']
if not provider.can_close():
return False
return True
def on_close(self, cancelable=False) -> bool:
"""Check if any provider has any pending task before closing."""
can_close = True
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider = provider_info['instance']
provider_can_close = provider.can_close()
can_close = can_close and provider_can_close
logger.debug(
f"Provider {provider_name} can close: {provider_can_close}"
)
if provider_can_close:
provider.shutdown()
return can_close
def after_configuration_update(self, options: List[Union[tuple, str]]):
"""
Update plugin and/or provider configurations.
Settings are propagated from changes on the configuration page and/or
provider tabs.
"""
providers_to_update = set({})
for option in options:
if option == 'completions_wait_for_ms':
self.wait_for_ms = self.get_conf(
'completions_wait_for_ms')
elif isinstance(option, tuple):
option_name, provider_name, *__ = option
if option_name == 'enabled_providers':
provider_status = self.get_conf(
('enabled_providers', provider_name))
if provider_status:
self.start_provider_instance(provider_name)
self.register_statusbar_widget(provider_name)
else:
self.shutdown_provider_instance(provider_name)
self.unregister_statusbar(provider_name)
elif option_name == 'provider_configuration':
providers_to_update |= {provider_name}
def on_mainwindow_visible(self):
for provider_name in self.providers:
provider_info = self.providers[provider_name]
provider_info['instance'].on_mainwindow_visible()
# ---------------------------- Status bar widgets -------------------------
def register_statusbar_widgets(self, plugin_loaded=True):
"""
Register status bar widgets for all providers with the container.
Parameters
----------
plugin_loaded: bool
True if the plugin is already loaded in Spyder, False if it is
being loaded. This is needed to avoid adding statusbar widgets
multiple times at startup.
"""
for provider_key in self.providers:
provider_on = self.get_conf(
('enabled_providers', provider_key), True)
if provider_on:
self.register_statusbar_widget(
provider_key, plugin_loaded=plugin_loaded)
def register_statusbar_widget(self, provider_name, plugin_loaded=True):
"""
Register statusbar widgets for a given provider.
Parameters
----------
provider_name: str
Name of the provider that is going to create statusbar widgets.
plugin_loaded: bool
True if the plugin is already loaded in Spyder, False if it is
being loaded.
"""
container = self.get_container()
provider = self.providers[provider_name]['instance']
widgets_ids = container.register_statusbar_widgets(
provider.STATUS_BAR_CLASSES, provider_name)
if plugin_loaded:
for id_ in widgets_ids:
current_widget = container.statusbar_widgets[id_]
# Validation to check for status bar registration before trying
# to add a widget.
# See spyder-ide/spyder#16997
if id_ not in self.statusbar.get_status_widgets():
self.statusbar.add_status_widget(current_widget)
def unregister_statusbar(self, provider_name):
"""
Unregister statusbar widgets for a given provider.
Parameters
----------
provider_name: str
Name of the provider that is going to delete statusbar widgets.
"""
container = self.get_container()
provider_keys = self.get_container().get_provider_statusbar_keys(
provider_name)
for id_ in provider_keys:
# Validation to check for status bar registration before trying
# to remove a widget.
# See spyder-ide/spyder#16997
if id_ in container.statusbar_widgets:
self.get_container().remove_statusbar_widget(id_)
self.statusbar.remove_status_widget(id_)
# -------- Completion provider initialization redefinition wrappers -------
def gather_providers_and_configtabs(self):
"""
Gather and register providers and their configuration tabs.
This method iterates over all completion providers, takes their
corresponding configuration tabs, and patches the methods that
interact with writing/reading/removing configuration options to
consider provider options that are stored inside this plugin's
`provider_configuration` option, which makes providers unaware
of the CompletionPlugin existence.
"""
conf_providers = []
conf_tabs = []
widget_funcs = self.gather_create_ops()
for provider_key in self.providers:
provider = self.providers[provider_key]['instance']
for tab in provider.CONF_TABS:
# Add set_option/get_option/remove_option to tab definition
setattr(tab, 'get_option',
self.wrap_get_option(provider_key))
setattr(tab, 'set_option',
self.wrap_set_option(provider_key))
setattr(tab, 'remove_option',
self.wrap_remove_option(provider_key))
# Wrap apply_settings to return settings correctly
setattr(tab, 'apply_settings',
self.wrap_apply_settings(tab, provider_key))
# Wrap create_* methods to consider provider
for name, pos in widget_funcs:
setattr(tab, name,
self.wrap_create_op(name, pos, provider_key))
conf_tabs += provider.CONF_TABS
conf_providers.append((provider_key, provider.get_name()))
return conf_providers, conf_tabs
def gather_create_ops(self):
"""
Extract all the create_* methods declared in the
:class:`spyder.api.preferences.PluginConfigPage` class
"""
# Filter widget creation functions in ConfigPage
members = inspect.getmembers(CompletionConfigPage)
widget_funcs = []
for name, call in members:
if name.startswith('create_'):
sig = inspect.signature(call)
parameters = sig.parameters
if 'option' in sig.parameters:
pos = -1
for param in parameters:
if param == 'option':
break
pos += 1
widget_funcs.append((name, pos))
return widget_funcs
def wrap_get_option(self, provider):
"""
Wraps `get_option` method for a provider config tab to consider its
actual section nested inside the `provider_configuration` key of this
plugin options.
This wrapper method allows configuration tabs to not be aware about
their presence behind the completion plugin.
"""
plugin = self
def wrapper(self, option, default=NoDefault, section=None):
if section is None:
if isinstance(option, tuple):
option = ('provider_configuration', provider, 'values',
*option)
else:
option = ('provider_configuration', provider, 'values',
option)
return plugin.get_conf(option, default, section)
return wrapper
def wrap_set_option(self, provider):
"""
Wraps `set_option` method for a provider config tab to consider its
actual section nested inside the `provider_configuration` key of this
plugin options.
This wrapper method allows configuration tabs to not be aware about
their presence behind the completion plugin.
"""
plugin = self
def wrapper(self, option, value, section=None,
recursive_notification=False):
if section is None:
if isinstance(option, tuple):
option = ('provider_configuration', provider, 'values',
*option)
else:
option = ('provider_configuration', provider, 'values',
option)
return plugin.set_conf(
option, value, section,
recursive_notification=recursive_notification)
return wrapper
def wrap_remove_option(self, provider):
"""
Wraps `remove_option` method for a provider config tab to consider its
actual section nested inside the `provider_configuration` key of this
plugin options.
This wrapper method allows configuration tabs to not be aware about
their presence behind the completion plugin.
"""
plugin = self
def wrapper(self, option, section=None):
if section is None:
if isinstance(option, tuple):
option = ('provider_configuration', provider, 'values',
*option)
else:
option = ('provider_configuration', provider, 'values',
option)
return plugin.remove_conf(option, section)
return wrapper
def wrap_create_op(self, create_name, opt_pos, provider):
"""
Wraps `create_*` methods for a provider config tab to consider its
actual section nested inside the `provider_configuration` key of this
plugin options.
This wrapper method allows configuration tabs to not be aware about
their presence behind the completion plugin.
"""
def wrapper(self, *args, **kwargs):
if kwargs.get('section', None) is None:
arg_list = list(args)
if isinstance(args[opt_pos], tuple):
arg_list[opt_pos] = (
'provider_configuration', provider, 'values',
*args[opt_pos])
else:
arg_list[opt_pos] = (
'provider_configuration', provider, 'values',
args[opt_pos])
args = tuple(arg_list)
call = getattr(self.parent, create_name)
widget = call(*args, **kwargs)
widget.setParent(self)
return widget
return wrapper
def wrap_apply_settings(self, Tab, provider):
"""
Wraps `apply_settings` method for a provider config tab to consider its
actual section nested inside the `provider_configuration` key of this
plugin options.
This wrapper method allows configuration tabs to not be aware about
their presence behind the completion plugin.
"""
prev_method = Tab.apply_settings
def wrapper(self):
wrapped_opts = set({})
for opt in prev_method(self):
if isinstance(opt, tuple):
wrapped_opts |= {('provider_configuration',
provider, 'values', *opt)}
else:
wrapped_opts |= {(
'provider_configuration', provider, 'values', opt)}
return wrapped_opts
return wrapper
# ---------- Completion provider registering/start/stop methods -----------
@staticmethod
def _merge_default_configurations(Provider: SpyderCompletionProvider,
provider_name: str,
provider_configurations: dict):
provider_defaults = dict(Provider.CONF_DEFAULTS)
provider_conf_version = Provider.CONF_VERSION
if provider_name not in provider_configurations:
# Pick completion provider default configuration options
provider_config = {
'version': provider_conf_version,
'values': provider_defaults,
'defaults': provider_defaults,
}
provider_configurations[provider_name] = provider_config
# Check if there were any version changes between configurations
provider_config = provider_configurations[provider_name]
provider_conf_version = parse(Provider.CONF_VERSION)
current_conf_version = parse(provider_config['version'])
current_conf_values = provider_config['values']
current_defaults = provider_config['defaults']
# Check if there are new default values and copy them
new_keys = provider_defaults.keys() - current_conf_values.keys()
for new_key in new_keys:
current_conf_values[new_key] = provider_defaults[new_key]
current_defaults[new_key] = provider_defaults[new_key]
if provider_conf_version > current_conf_version:
# Check if default values were changed between versions,
# causing an overwrite of the current options
preserved_keys = current_defaults.keys() & provider_defaults.keys()
for key in preserved_keys:
if current_defaults[key] != provider_defaults[key]:
current_defaults[key] = provider_defaults[key]
current_conf_values[key] = provider_defaults[key]
if provider_conf_version.major != current_conf_version.major:
# Check if keys were removed/renamed from the previous defaults
deleted_keys = (
current_defaults.keys() - provider_defaults.keys())
for key in deleted_keys:
current_defaults.pop(key)
current_conf_values.pop(key)
return (str(provider_conf_version), current_conf_values,
current_defaults)
def get_provider_configuration(self, Provider: SpyderCompletionProvider,
provider_name: str) -> dict:
"""Get provider configuration dictionary."""
provider_configurations = self.get_conf(
'provider_configuration')
(provider_conf_version,
current_conf_values,
provider_defaults) = self._merge_default_configurations(
Provider, provider_name, provider_configurations)
new_provider_config = {
'version': provider_conf_version,
'values': current_conf_values,
'defaults': provider_defaults
}
provider_configurations[provider_name] = new_provider_config
# Update provider configurations
self.set_conf('provider_configuration', provider_configurations)
return new_provider_config
def update_request_priorities(self, Provider: SpyderCompletionProvider,
provider_name: str):
"""Sort request priorities based on Provider declared order."""
source_priorities = self.get_conf('request_priorities')
provider_priority = Provider.DEFAULT_ORDER
for request in COMPLETION_REQUESTS:
request_priorities = source_priorities.get(request, {})
self.provider_speed[provider_name] = Provider.SLOW
request_priorities[provider_name] = provider_priority - 1
source_priorities[request] = request_priorities
self.source_priority = source_priorities
self.set_conf('request_priorities', source_priorities)
def connect_provider_signals(self, provider_instance):
"""Connect SpyderCompletionProvider signals."""
container = self.get_container()
provider_instance.sig_provider_ready.connect(self.provider_available)
provider_instance.sig_stop_completions.connect(
self.sig_stop_completions)
provider_instance.sig_response_ready.connect(self.receive_response)
provider_instance.sig_exception_occurred.connect(
self.sig_exception_occurred)
provider_instance.sig_language_completions_available.connect(
self.sig_language_completions_available)
provider_instance.sig_disable_provider.connect(
self.shutdown_provider_instance)
provider_instance.sig_show_widget.connect(
container.show_widget
)
provider_instance.sig_call_statusbar.connect(
container.statusbar_rpc)
provider_instance.sig_open_file.connect(self.sig_open_file)
self.sig_pythonpath_changed.connect(
provider_instance.python_path_update)
self._sig_interpreter_changed.connect(
provider_instance.interpreter_changed
)
def _instantiate_and_register_provider(
self, Provider: SpyderCompletionProvider):
provider_name = Provider.COMPLETION_PROVIDER_NAME
if provider_name in self._available_providers:
return
self._available_providers[provider_name] = Provider
logger.debug("Completion plugin: Registering {0}".format(
provider_name))
# Merge configuration settings between a provider defaults and
# the existing ones
provider_config = self.get_provider_configuration(
Provider, provider_name)
# Merge and update source priority order
self.update_request_priorities(Provider, provider_name)
# Instantiate provider
provider_instance = Provider(self, provider_config['values'])
# Signals
self.connect_provider_signals(provider_instance)
self.providers[provider_name] = {
'instance': provider_instance,
'status': self.STOPPED
}
for language in self.language_status:
server_status = self.language_status[language]
server_status[provider_name] = False
def start_all_providers(self, force=False):
"""Start all detected completion providers."""
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.STOPPED or force:
provider_enabled = self.get_conf(
('enabled_providers', provider_name), True)
if provider_enabled:
provider_info['instance'].start()
@Slot(str)
def provider_available(self, provider_name: str):
"""Indicate that the completion provider `provider_name` is running."""
provider_info = self.providers[provider_name]
provider_info['status'] = self.RUNNING
self.sig_provider_ready.emit(provider_name)
def start_completion_services_for_language(self, language: str) -> bool:
"""Start completion providers for a given programming language."""
started = False
language_providers = self.language_status.get(language, {})
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider = provider_info['instance']
provider_started = (
provider.start_completion_services_for_language(language))
started |= provider_started
language_providers[provider_name] = provider_started
self.language_status[language] = language_providers
return started
def stop_completion_services_for_language(self, language: str):
"""Stop completion providers for a given programming language."""
for provider_name in self.providers:
provider_info = self.providers[provider_name]
instance = provider_info['instance']
if provider_info['status'] == self.RUNNING:
instance.stop_completion_services_for_language(language)
self.language_status.pop(language)
def get_provider(self, name: str) -> SpyderCompletionProvider:
"""Get the :class:`SpyderCompletionProvider` identified with `name`."""
return self.providers[name]['instance']
def is_provider_running(self, name: str) -> bool:
"""Return if provider is running."""
status = self.clients.get(name, {}).get('status', self.STOPPED)
return status == self.RUNNING
def available_providers_for_language(self, language: str) -> List[str]:
"""Return the list of providers available for a given language."""
providers = []
if language in self.language_status:
provider_status = self.language_status[language]
providers = [p for p in provider_status if provider_status[p]]
return providers
def is_fallback_only(self, language: str) -> bool:
"""
Return if fallback and snippets are the only available providers for
a given language.
"""
available_providers = set(
self.available_providers_for_language(language))
fallback_providers = {'snippets', 'fallback'}
return (available_providers - fallback_providers) == set()
def sort_providers_for_request(
self, providers: List[str], req_type: str) -> List[str]:
"""Sort providers for a given request type."""
request_order = self.source_priority[req_type]
return sorted(providers, key=lambda p: request_order[p])
def start_provider_instance(self, provider_name: str):
"""Start a given provider."""
provider_info = self.providers[provider_name]
if provider_info['status'] == self.STOPPED:
provider_instance = provider_info['instance']
provider_instance.start()
for language in self.language_status:
language_providers = self.language_status[language]
language_providers[provider_name] = (
provider_instance.start_completion_services_for_language(
language
)
)
def shutdown_provider_instance(self, provider_name: str):
"""Shutdown a given provider."""
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider_info['instance'].shutdown()
provider_info['status'] = self.STOPPED
for language in self.language_status:
language_providers = self.language_status[language]
if provider_name in language_providers:
language_providers[provider_name] = False
# ---------- Methods to create/access graphical elements -----------
def create_action(self, *args, **kwargs):
container = self.get_container()
kwargs['parent'] = container
return container.create_action(*args, **kwargs)
def get_action(self, *args, **kwargs):
container = self.get_container()
return container.get_action(*args, **kwargs)
def get_application_menu(self, *args, **kwargs):
# TODO: Check if this method makes sense with the new plugin
# registration mechanism.
main_menu = self.get_plugin(Plugins.MainMenu)
if main_menu:
return main_menu.get_application_menu(*args, **kwargs)
def get_menu(self, *args, **kwargs):
container = self.get_container()
return container.get_menu(*args, **kwargs)
def create_application_menu(self, *args, **kwargs):
self.application_menus_to_create.append((args, kwargs))
def create_menu(self, *args, **kwargs):
container = self.get_container()
return container.create_menu(*args, **kwargs)
def add_item_to_application_menu(self, *args, **kwargs):
self.items_to_add_to_application_menus.append((args, kwargs))
def remove_item_from_application_menu(self, *args, **kwargs):
main_menu = self.get_plugin(Plugins.MainMenu)
if main_menu:
main_menu.remove_item_from_application_menu(*args, **kwargs)
def add_item_to_menu(self, *args, **kwargs):
container = self.get_container()
container.add_item_to_menu(*args, **kwargs)
# --------------- Public completion API request methods -------------------
def send_request(self, language: str, req_type: str, req: dict):
"""
Send a completion or linting request to all available providers.
The completion request `req_type` needs to have a response.
Parameters
----------
language: str
Name of the programming language of the file that emits the
request.
req_type: str
Type of request, one of
:class:`spyder.plugins.completion.api.CompletionRequestTypes`
req: dict
Request body
{
'filename': str,
**kwargs: request-specific parameters
}
"""
req_id = self.req_id
self.req_id += 1
self.requests[req_id] = {
'language': language,
'req_type': req_type,
'response_instance': weakref.ref(req['response_instance']),
'sources': {},
'timed_out': False,
}
# Check if there are two or more slow completion providers
# in order to start the timeout counter.
providers = self.available_providers_for_language(language.lower())
slow_provider_count = sum([self.provider_speed[p] for p in providers])
# Start the timer on this request
if req_type in self.AGGREGATE_RESPONSES and slow_provider_count > 2:
if self.wait_for_ms > 0:
QTimer.singleShot(self.wait_for_ms,
lambda: self.receive_timeout(req_id))
else:
self.requests[req_id]['timed_out'] = True
# Send request to all running completion providers
for provider_name in providers:
provider_info = self.providers[provider_name]
provider_info['instance'].send_request(
language, req_type, req, req_id)
def send_notification(
self, language: str, notification_type: str, notification: dict):
"""
Send a notification to all available completion providers.
Parameters
----------
language: str
Name of the programming language of the file that emits the
request.
notification_type: str
Type of request, one of
:class:`spyder.plugins.completion.api.CompletionRequestTypes`
notification: dict
Request body
{
'filename': str,
**kwargs: notification-specific parameters
}
"""
providers = self.available_providers_for_language(language.lower())
for provider_name in providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider_info['instance'].send_notification(
language, notification_type, notification)
def broadcast_notification(self, req_type: str, req: dict):
"""
Send a notification to all available completion providers for all
programming languages.
Parameters
----------
req_type: str
Type of request, one of
:class:`spyder.plugins.completion.api.CompletionRequestTypes`.
req: dict
Request body:
{
'filename': str,
**kwargs: notification-specific parameters
}
"""
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider_info['instance'].broadcast_notification(
req_type, req)
def project_path_update(self, project_path: str, update_kind='addition',
instance=None):
"""
Handle project path updates on Spyder.
Parameters
----------
project_path: str
Path to the project folder being added or removed.
update_kind: str
Path update kind, one of
:class:`spyder.plugins.completion.WorkspaceUpdateKind`.
instance: object
Reference to :class:`spyder.plugins.projects.plugin.Projects`.
"""
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider_info['instance'].project_path_update(
project_path, update_kind, instance
)
@Slot(str, str)
def file_opened_closed_or_updated(self, filename: str, language: str):
"""
Handle file modifications and file switching events, including when a
new file is created.
Parameters
----------
filename: str
Path to the file that was changed/opened/focused.
language: str
Name of the programming language of the file that was
changed/opened/focused.
"""
if filename is not None and language is not None:
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider_info['instance'].file_opened_closed_or_updated(
filename, language)
def register_file(self, language: str, filename: str, codeeditor):
"""
Register file to perform completions.
If a language client is not available for a given file, then this
method should keep a queue, such that files can be initialized once
a server is available.
Parameters
----------
language: str
Programming language of the given file.
filename: str
Filename to register.
codeeditor: spyder.plugins.editor.widgets.codeeditor.CodeEditor
Codeeditor to send the client configurations.
"""
for provider_name in self.providers:
provider_info = self.providers[provider_name]
if provider_info['status'] == self.RUNNING:
provider_info['instance'].register_file(
language, filename, codeeditor
)
# ----------------- Completion result processing methods ------------------
@Slot(str, int, dict)
def receive_response(
self, completion_source: str, req_id: int, resp: dict):
"""Process request response from a completion provider."""
logger.debug("Completion plugin: Request {0} Got response "
"from {1}".format(req_id, completion_source))
if req_id not in self.requests:
return
with QMutexLocker(self.collection_mutex):
request_responses = self.requests[req_id]
request_responses['sources'][completion_source] = resp
self.match_and_reply(req_id)
@Slot(int)
def receive_timeout(self, req_id: int):
"""Collect all provider completions and reply on timeout."""
# On timeout, collect all completions and return to the user
if req_id not in self.requests:
return
logger.debug("Completion plugin: Request {} timed out".format(req_id))
with QMutexLocker(self.collection_mutex):
request_responses = self.requests[req_id]
request_responses['timed_out'] = True
self.match_and_reply(req_id)
def match_and_reply(self, req_id: int):
"""
Decide how to send the responses corresponding to req_id to
the instance that requested them.
"""
if req_id not in self.requests:
return
request_responses = self.requests[req_id]
language = request_responses['language'].lower()
req_type = request_responses['req_type']
available_providers = self.available_providers_for_language(
language)
sorted_providers = self.sort_providers_for_request(
available_providers, req_type)
if req_type in self.AGGREGATE_RESPONSES:
# Wait only for the available providers for the given request
timed_out = request_responses['timed_out']
all_returned = all(source in request_responses['sources']
for source in sorted_providers)
if not timed_out:
# Before the timeout
if all_returned:
self.skip_and_reply(req_id)
else:
# After the timeout
any_nonempty = any(request_responses['sources'].get(source)
for source in sorted_providers)
if all_returned or any_nonempty:
self.skip_and_reply(req_id)
else:
# Any empty response will be discarded and the completion
# loop will wait for the next non-empty response.
# This should fix the scenario where a provider does not have a
# response for a non-aggregated request but the LSP does.
any_nonempty = any(request_responses['sources'].get(source)
for source in sorted_providers)
if any_nonempty:
self.skip_and_reply(req_id)
def skip_and_reply(self, req_id: int):
"""
Skip intermediate responses coming from the same CodeEditor
instance for some types of requests, and send the last one to
it.
"""
request_responses = self.requests[req_id]
req_type = request_responses['req_type']
response_instance = id(request_responses['response_instance']())
do_send = True
# This is necessary to prevent sending completions for old requests
# See spyder-ide/spyder#10798
if req_type in self.SKIP_INTERMEDIATE_REQUESTS:
max_req_id = max(
[key for key, item in self.requests.items()
if item['req_type'] == req_type
and id(item['response_instance']()) == response_instance]
or [-1])
do_send = (req_id == max_req_id)
logger.debug("Completion plugin: Request {} removed".format(req_id))
del self.requests[req_id]
# Send only recent responses
if do_send:
self.gather_and_reply(request_responses)
def gather_and_reply(self, request_responses: dict):
"""
Gather request responses from all providers and send them to the
CodeEditor instance that requested them.
"""
req_type = request_responses['req_type']
req_id_responses = request_responses['sources']
response_instance = request_responses['response_instance']()
logger.debug('Gather responses for {0}'.format(req_type))
if req_type == CompletionRequestTypes.DOCUMENT_COMPLETION:
responses = self.gather_completions(req_id_responses)
else:
responses = self.gather_responses(req_type, req_id_responses)
try:
if response_instance:
response_instance.handle_response(req_type, responses)
except RuntimeError:
# This is triggered when a codeeditor instance has been
# removed before the response can be processed.
pass
def gather_completions(self, req_id_responses: dict):
"""Gather completion responses from providers."""
priorities = self.source_priority[
CompletionRequestTypes.DOCUMENT_COMPLETION]
priorities = sorted(list(priorities.keys()),
key=lambda p: priorities[p])
merge_stats = {source: 0 for source in req_id_responses}
responses = []
dedupe_set = set()
for priority, source in enumerate(priorities):
if source not in req_id_responses:
continue
for response in req_id_responses[source].get('params', []):
dedupe_key = response['label'].strip()
if dedupe_key in dedupe_set:
continue
dedupe_set.add(dedupe_key)
response['sortText'] = (priority, response['sortText'])
responses.append(response)
merge_stats[source] += 1
logger.debug('Responses statistics: {0}'.format(merge_stats))
responses = {'params': responses}
return responses
def gather_responses(self, req_type: int, responses: dict):
"""Gather responses other than completions from providers."""
response = None
for source in self.source_priority[req_type]:
if source in responses:
response = responses[source].get('params', None)
if response:
break
return {'params': response}
| CompletionPlugin |
python | huggingface__transformers | src/transformers/models/gemma/modeling_gemma.py | {
"start": 22195,
"end": 22300
} | class ____(GenericForSequenceClassification, GemmaPreTrainedModel):
pass
| GemmaForSequenceClassification |
python | davidhalter__jedi | jedi/inference/value/instance.py | {
"start": 5923,
"end": 6760
} | class ____(AbstractInstanceValue):
# This is not really a compiled class, it's just an instance from a
# compiled class.
def __init__(self, inference_state, parent_context, class_value, arguments):
super().__init__(inference_state, parent_context, class_value)
self._arguments = arguments
def get_filters(self, origin_scope=None, include_self_names=True):
class_value = self.get_annotated_class_object()
class_filters = class_value.get_filters(
origin_scope=origin_scope,
is_instance=True,
)
for f in class_filters:
yield CompiledInstanceClassFilter(self, f)
@property
def name(self):
return compiled.CompiledValueName(self, self.class_value.name.string_name)
def is_stub(self):
return False
| CompiledInstance |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/visitors/query_expression.py | {
"start": 1831,
"end": 2767
} | class ____(QueryExpressionVisitor[QueryExpression]):
"""
Visitor that recursively injects a `ConditionGroup` into all `Timeseries`.
"""
def __init__(self, condition_group: ConditionGroup):
self._condition_group = condition_group
def _visit_formula(self, formula: Formula) -> QueryExpression:
if self._condition_group:
current_filters = formula.filters if formula.filters else []
current_filters.extend(self._condition_group)
return formula.set_filters(current_filters)
return formula
def _visit_timeseries(self, timeseries: Timeseries) -> QueryExpression:
if self._condition_group:
current_filters = timeseries.filters if timeseries.filters else []
current_filters.extend(self._condition_group)
return timeseries.set_filters(current_filters)
return timeseries
| TimeseriesConditionInjectionVisitor |
python | doocs__leetcode | solution/0600-0699/0635.Design Log Storage System/Solution.py | {
"start": 0,
"end": 681
} | class ____:
def __init__(self):
self.logs = []
self.d = {
"Year": 4,
"Month": 7,
"Day": 10,
"Hour": 13,
"Minute": 16,
"Second": 19,
}
def put(self, id: int, timestamp: str) -> None:
self.logs.append((id, timestamp))
def retrieve(self, start: str, end: str, granularity: str) -> List[int]:
i = self.d[granularity]
return [id for id, ts in self.logs if start[:i] <= ts[:i] <= end[:i]]
# Your LogSystem object will be instantiated and called as such:
# obj = LogSystem()
# obj.put(id,timestamp)
# param_2 = obj.retrieve(start,end,granularity)
| LogSystem |
python | wandb__wandb | wandb/vendor/pygments/lexers/smalltalk.py | {
"start": 474,
"end": 5359
} | class ____(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
.. versionadded:: 0.10
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
'root': [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition': [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables': [
include('whitespaces'),
(r'(:)(\s*)(\w+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
default('#pop'), # else pop
],
'literals': [
(r"'(''|[^'])*'", String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper': [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
# literals
(r"'(''|[^'])*'", String),
(r'\$.', String.Char),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth': [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root', 'afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces': [
# skip whitespace and comments
(r'\s+', Text),
(r'"(""|[^"])*"', Comment),
],
'objects': [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject': [
(r'! !$', Keyword, '#pop'), # squeak chunk delimiter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout': [
# Squeak fileout format (optional)
(r'^"(""|[^"])*"!', Keyword),
(r"^'(''|[^'])*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
| SmalltalkLexer |
python | django__django | tests/postgres_tests/models.py | {
"start": 3648,
"end": 3788
} | class ____(PostgreSQLModel):
line = models.ForeignKey("Line", models.CASCADE)
query = models.CharField(max_length=100)
| LineSavedSearch |
python | walkccc__LeetCode | solutions/624. Maximum Distance in Arrays/624-2.py | {
"start": 0,
"end": 539
} | class ____:
def maxDistance(self, arrays: list[list[int]]) -> int:
min1, index_min1 = min((A[0], i) for i, A in enumerate(arrays))
max1, index_max1 = max((A[-1], i) for i, A in enumerate(arrays))
if index_min1 != index_max1:
return max1 - min1
min2, index_min2 = min((A[0], i)
for i, A in enumerate(arrays) if i != index_min1)
max2, index_min2 = max((A[-1], i)
for i, A in enumerate(arrays) if i != index_max1)
return max(max1 - min2, max2 - min1)
| Solution |
python | google__pytype | pytype/tests/test_typed_dict.py | {
"start": 11080,
"end": 14286
} | class ____(test_base.BaseTest):
"""Tests for typing.TypedDict functional constructor."""
def test_constructor(self):
self.CheckWithErrors("""
from typing_extensions import TypedDict
A = TypedDict("A", {"x": int, "y": str})
B = TypedDict("B", "b") # wrong-arg-types
C = TypedDict("C") # wrong-arg-count
""")
def test_init(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
A = TypedDict("A", {"x": int, "y": str})
a = A(x=1, y='2')
b = A(x=1, y=2) # wrong-arg-types[e1]
c = A(x=1) # missing-parameter[e2]
d = A(y='1') # missing-parameter
e = A(1, '2') # missing-parameter
""")
self.assertErrorSequences(
err,
{
"e1": ["Expected", "(*, x, y: str)", "Actual", "(x, y: int)"],
"e2": ["Expected", "(*, x, y)", "Actual", "(x)"],
},
)
def test_annotation(self):
err = self.CheckWithErrors("""
from typing_extensions import TypedDict
A = TypedDict("A", {"x": int, "y": str})
a: A = {'x': '10', 'z': 20} # annotation-type-mismatch[e]
""")
self.assertErrorSequences(
err,
{
"e": [
"Annotation: A",
"extra keys",
"z",
"type errors",
"{'x': ...}",
"expected int",
"got str",
]
},
)
def test_keyword_field_name(self):
with self.DepTree([(
"foo.py",
"""
from typing_extensions import TypedDict
A = TypedDict("A", {"in": int})
""",
)]):
self.Check("""
import foo
a: foo.A
assert_type(a["in"], int)
""")
def test_colon_field_name(self):
with self.DepTree([(
"foo.py",
"""
from typing_extensions import TypedDict
XMLDict = TypedDict("XMLDict", {"xml:name": str})
""",
)]):
self.Check("""
import foo
d: foo.XMLDict
assert_type(d["xml:name"], str)
""")
def test_total(self):
ty = self.Infer("""
from typing_extensions import TypedDict
X = TypedDict('X', {'name': str}, total=False)
X()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TypedDict
class X(TypedDict, total=False):
name: str
""",
)
self.Check("""
from typing import Any, Generic, TypedDict, TypeVar, is_typeddict
T = TypeVar('T')
class Foo(TypedDict, Generic[T]):
foo: str
bar: T
x: Foo
assert_type(x['foo'], str)
assert_type(x['bar'], Any) # TODO(b/328744430): Properly support generics.
""")
def test_ambiguous_field_type(self):
self.CheckWithErrors("""
from typing_extensions import TypedDict
if __random__:
v = str
else:
v = int
X = TypedDict('X', {'k': v}) # invalid-annotation
""")
_SINGLE = """
from typing import TypedDict
class A(TypedDict):
x: int
y: str
"""
_MULTIPLE = """
from typing import TypedDict
class A(TypedDict):
x: int
y: str
class B(A):
z: int
"""
| TypedDictFunctionalTest |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 56306,
"end": 56650
} | class ____(BaseModel):
usage: Optional["Usage"] = Field(default=None, description="")
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional[str] = Field(default=None, description="")
result: Optional["ScrollResult"] = Field(default=None, description="")
| InlineResponse20015 |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/conv_ops_test.py | {
"start": 123593,
"end": 125452
} | class ____(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = self.evaluate([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = self.evaluate([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
| DeepConv2DTest |
python | python__mypy | mypy/nodes.py | {
"start": 25034,
"end": 26342
} | class ____(Node):
"""A single argument in a FuncItem."""
__slots__ = ("variable", "type_annotation", "initializer", "kind", "pos_only")
__match_args__ = ("variable", "type_annotation", "initializer", "kind", "pos_only")
def __init__(
self,
variable: Var,
type_annotation: mypy.types.Type | None,
initializer: Expression | None,
kind: ArgKind,
pos_only: bool = False,
) -> None:
super().__init__()
self.variable = variable
self.type_annotation = type_annotation
self.initializer = initializer
self.kind = kind # must be an ARG_* constant
self.pos_only = pos_only
def set_line(
self,
target: Context | int,
column: int | None = None,
end_line: int | None = None,
end_column: int | None = None,
) -> None:
super().set_line(target, column, end_line, end_column)
if self.initializer and self.initializer.line < 0:
self.initializer.set_line(self.line, self.column, self.end_line, self.end_column)
self.variable.set_line(self.line, self.column, self.end_line, self.end_column)
# These specify the kind of a TypeParam
TYPE_VAR_KIND: Final = 0
PARAM_SPEC_KIND: Final = 1
TYPE_VAR_TUPLE_KIND: Final = 2
| Argument |
python | kamyu104__LeetCode-Solutions | Python/number-of-operations-to-make-network-connected.py | {
"start": 544,
"end": 981
} | class ____(object):
def makeConnected(self, n, connections):
"""
:type n: int
:type connections: List[List[int]]
:rtype: int
"""
if len(connections) < n-1:
return -1
union_find = UnionFind(n)
for i, j in connections:
union_find.union_set(i, j)
return union_find.count - 1
# Time: O(|E| + |V|)
# Space: O(|V|)
import collections
| Solution |
python | huggingface__transformers | src/transformers/models/glm46v/modular_glm46v.py | {
"start": 5298,
"end": 5490
} | class ____(Glm4vProcessor):
def replace_frame_token_id(self, timestamp_sec):
return f"<|begin_of_image|>{self.image_token}<|end_of_image|>{timestamp_sec:.1f} seconds"
| Glm46VProcessor |
python | getsentry__sentry | src/sentry/seer/autofix/constants.py | {
"start": 1031,
"end": 1160
} | class ____(enum.Enum):
ISSUE_DETAILS = "issue_details"
ALERT = "alert"
POST_PROCESS = "post_process"
| SeerAutomationSource |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/unbatch_test.py | {
"start": 9969,
"end": 11246
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def build_dataset(self,
multiplier=15.0,
tensor_slice_len=2,
batch_size=2,
options=None):
components = (np.arange(tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(tensor_slice_len))
dataset = dataset_ops.Dataset.from_tensor_slices(components).batch(
batch_size).unbatch()
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def test(self, verify_fn, symbolic_checkpoint):
tensor_slice_len = 8
batch_size = 2
num_outputs = tensor_slice_len
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self,
lambda: self.build_dataset(15.0, tensor_slice_len, batch_size, options),
num_outputs)
| UnbatchCheckpointTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.