language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | plotly__plotly.py | plotly/graph_objs/sankey/legendgrouptitle/_font.py | {
"start": 233,
"end": 9921
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sankey.legendgrouptitle"
_path_str = "sankey.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sankey.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | redis__redis-py | tests/test_asyncio/test_pubsub.py | {
"start": 36510,
"end": 39586
} | class ____:
@pytest.mark.skipif(
sys.version_info < (3, 8), reason="requires python 3.8 or higher"
)
async def test_outer_timeout(self, r: redis.Redis):
"""
Using asyncio_timeout manually outside the inner method timeouts works.
This works on Python versions 3.8 and greater, at which time asyncio.
CancelledError became a BaseException instead of an Exception before.
"""
pubsub = r.pubsub()
await pubsub.subscribe("foo")
assert pubsub.connection.is_connected
async def get_msg_or_timeout(timeout=0.1):
async with async_timeout(timeout):
# blocking method to return messages
while True:
response = await pubsub.parse_response(block=True)
message = await pubsub.handle_message(
response, ignore_subscribe_messages=False
)
if message is not None:
return message
# get subscribe message
msg = await get_msg_or_timeout(10)
assert msg is not None
# timeout waiting for another message which never arrives
assert pubsub.connection.is_connected
with pytest.raises(asyncio.TimeoutError):
await get_msg_or_timeout()
# the timeout on the read should not cause disconnect
assert pubsub.connection.is_connected
@pytest.mark.skipif(
sys.version_info < (3, 8), reason="requires python 3.8 or higher"
)
async def test_base_exception(self, r: redis.Redis):
"""
Manually trigger a BaseException inside the parser's .read_response method
and verify that it isn't caught
"""
pubsub = r.pubsub()
await pubsub.subscribe("foo")
assert pubsub.connection.is_connected
async def get_msg():
# blocking method to return messages
while True:
response = await pubsub.parse_response(block=True)
message = await pubsub.handle_message(
response, ignore_subscribe_messages=False
)
if message is not None:
return message
# get subscribe message
msg = await get_msg()
assert msg is not None
# timeout waiting for another message which never arrives
assert pubsub.connection.is_connected
with (
patch("redis._parsers._AsyncRESP2Parser.read_response") as mock1,
patch("redis._parsers._AsyncHiredisParser.read_response") as mock2,
patch("redis._parsers._AsyncRESP3Parser.read_response") as mock3,
):
mock1.side_effect = BaseException("boom")
mock2.side_effect = BaseException("boom")
mock3.side_effect = BaseException("boom")
with pytest.raises(BaseException):
await get_msg()
# the timeout on the read should not cause disconnect
assert pubsub.connection.is_connected
| TestBaseException |
python | h5py__h5py | h5py/_hl/base.py | {
"start": 4662,
"end": 6821
} | class ____:
"""
Mixin class that allows sharing information between objects which
reside in the same HDF5 file. Requires that the host class have
a ".id" attribute which returns a low-level ObjectID subclass.
Also implements Unicode operations.
"""
@property
def _lapl(self):
""" Fetch the link access property list appropriate for this object
"""
return dlapl
@property
def _lcpl(self):
""" Fetch the link creation property list appropriate for this object
"""
return dlcpl
def _e(self, name, lcpl=None):
""" Encode a name according to the current file settings.
Returns name, or 2-tuple (name, lcpl) if lcpl is True
- Binary strings are always passed as-is, h5t.CSET_ASCII
- Unicode strings are encoded utf8, h5t.CSET_UTF8
If name is None, returns either None or (None, None) appropriately.
"""
def get_lcpl(coding):
""" Create an appropriate link creation property list """
lcpl = self._lcpl.copy()
lcpl.set_char_encoding(coding)
return lcpl
if name is None:
return (None, None) if lcpl else None
if isinstance(name, bytes):
coding = h5t.CSET_ASCII
elif isinstance(name, str):
try:
name = name.encode('ascii')
coding = h5t.CSET_ASCII
except UnicodeEncodeError:
name = name.encode('utf8')
coding = h5t.CSET_UTF8
else:
raise TypeError(f"A name should be string or bytes, not {type(name)}")
if lcpl:
return name, get_lcpl(coding)
return name
def _d(self, name):
""" Decode a name according to the current file settings.
- Try to decode utf8
- Failing that, return the byte string
If name is None, returns None.
"""
if name is None:
return None
try:
return name.decode('utf8')
except UnicodeDecodeError:
pass
return name
| CommonStateObject |
python | astropy__astropy | astropy/stats/histogram.py | {
"start": 11325,
"end": 13554
} | class ____:
r"""Class which implements the function minimized by knuth_bin_width.
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data: ArrayLike) -> None:
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M: int) -> NDArray:
"""Return the bin edges given M number of bins."""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M: int) -> float:
return self.eval(M)
def eval(self, M: int) -> float:
"""Evaluate the Knuth function.
Parameters
----------
M : int
Number of bins
Returns
-------
F : float
evaluation of the negative Knuth loglikelihood function:
smaller values indicate a better fit.
"""
if not np.isscalar(M):
M = M[0]
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(
self.n * np.log(M)
+ self.gammaln(0.5 * M)
- M * self.gammaln(0.5)
- self.gammaln(self.n + 0.5 * M)
+ np.sum(self.gammaln(nk + 0.5))
)
| _KnuthF |
python | ApeWorX__ape | tests/functional/test_dependencies.py | {
"start": 17471,
"end": 18478
} | class ____:
NAME = "testlocaldep"
VERSION = "1.0.0"
PATH = Path.cwd()
@pytest.fixture
def dependency(self):
return LocalDependency(local=self.PATH, name=self.NAME, version=self.VERSION)
@property
def clean_path(self) -> str:
return str(self.PATH).replace(str(Path.home()), "$HOME")
def test_repr(self, dependency):
actual = repr(dependency)
expected = f"<LocalDependency local={self.clean_path}, version={self.VERSION}>"
assert actual == expected
def test_name(self, dependency):
assert dependency.name == self.NAME
def test_version(self, dependency):
assert dependency.version == self.VERSION
def test_uri(self, dependency):
assert dependency.uri == self.PATH.as_uri()
def test_local_expands_user(self):
path = "~/path/to/dep"
dependency = LocalDependency(local=path, name=self.NAME, version=self.VERSION)
assert "~" not in f"{dependency.local}"
| TestLocalDependency |
python | huggingface__transformers | src/transformers/models/edgetam_video/modular_edgetam_video.py | {
"start": 31239,
"end": 31303
} | class ____(Sam2VideoFeedForward):
pass
| EdgeTamVideoFeedForward |
python | doocs__leetcode | solution/0500-0599/0565.Array Nesting/Solution.py | {
"start": 0,
"end": 443
} | class ____:
def arrayNesting(self, nums: List[int]) -> int:
n = len(nums)
vis = [False] * n
res = 0
for i in range(n):
if vis[i]:
continue
cur, m = nums[i], 1
vis[cur] = True
while nums[cur] != nums[i]:
cur = nums[cur]
m += 1
vis[cur] = True
res = max(res, m)
return res
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/graph_stores/types.py | {
"start": 2709,
"end": 3271
} | class ____(BaseModel):
"""A relation connecting two entities in a graph."""
label: str
source_id: str
target_id: str
properties: Dict[str, Any] = Field(default_factory=dict)
def __str__(self) -> str:
"""Return the string representation of the relation."""
if self.properties:
return f"{self.label} ({self.properties})"
return self.label
@property
def id(self) -> str:
"""Get the relation id."""
return self.label
Triplet = Tuple[LabelledNode, Relation, LabelledNode]
| Relation |
python | tensorflow__tensorflow | tensorflow/python/module/module_test.py | {
"start": 7402,
"end": 8046
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def test_not_memoized_in_tf1(self):
if tf2.enabled():
self.skipTest("Requires TF1")
mod = module.Module(name="name")
name_scope_1 = mod.name_scope
name_scope_2 = mod.name_scope
self.assertIsNot(name_scope_1, name_scope_2)
self.assertEqual(name_scope_1.name, name_scope_2.name)
def test_memoized_in_tf2(self):
if not tf2.enabled():
self.skipTest("Requires TF2")
mod = module.Module(name="name")
name_scope_1 = mod.name_scope
name_scope_2 = mod.name_scope
self.assertIs(name_scope_1, name_scope_2)
| NameScopeTest |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-cometapi/llama_index/llms/cometapi/base.py | {
"start": 478,
"end": 2593
} | class ____(OpenAILike):
"""
CometAPI LLM.
CometAPI provides access to various state-of-the-art LLM models including GPT series,
Claude series, Gemini series, and more. To use CometAPI, you need to obtain an API key
from https://api.cometapi.com/console/token.
Examples:
`pip install llama-index-llms-cometapi`
```python
from llama_index.llms.cometapi import CometAPI
llm = CometAPI(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="gpt-4o-mini",
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(
description="The CometAPI model to use. See https://api.cometapi.com/pricing for available models."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "COMETAPI_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "COMETAPI_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "CometAPI_LLM"
| CometAPI |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_simple01.py | {
"start": 342,
"end": 3342
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("simple01.xlsx")
def test_create_file(self):
"""Test the creation of a simple workbook."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string(0, 0, "Hello")
worksheet.write_number(1, 0, 123)
workbook.close()
self.assertExcelEqual()
def test_create_file_A1(self):
"""Test the creation of a simple workbook with A1 notation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string("A1", "Hello")
worksheet.write_number("A2", 123)
workbook.close()
self.assertExcelEqual()
def test_create_file_write(self):
"""Test the creation of a simple workbook using write()."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "Hello")
worksheet.write(1, 0, 123)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_statement(self):
"""Test the creation of a simple workbook using `with` statement."""
with Workbook(self.got_filename) as workbook:
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "Hello")
worksheet.write(1, 0, 123)
self.assertExcelEqual()
def test_create_file_write_A1(self):
"""Test the creation of a simple workbook using write() with A1."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello")
worksheet.write("A2", 123)
workbook.close()
self.assertExcelEqual()
def test_create_file_kwargs(self):
"""Test the creation of a simple workbook with keyword args."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string(row=0, col=0, string="Hello")
worksheet.write_number(row=1, col=0, number=123)
workbook.close()
self.assertExcelEqual()
def test_create_file_write_date_default(self):
"""Test writing a datetime without a format. Issue #33"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Hello")
worksheet.write("A2", date(1900, 5, 2))
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple workbook."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
worksheet.write_string(0, 0, "Hello")
worksheet.write_number(1, 0, 123)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | spack__spack | lib/spack/spack/solver/input_analysis.py | {
"start": 19109,
"end": 20979
} | class ____(MinimalDuplicatesCounter):
def possible_packages_facts(self, gen, fn):
counter = collections.Counter(
list(self._link_run) + list(self._total_build) + list(self._direct_build)
)
gen.h2("Maximum number of nodes")
for pkg, count in sorted(counter.items(), key=lambda x: (x[1], x[0])):
count = min(count, 2)
gen.fact(fn.max_dupes(pkg, count))
gen.newline()
gen.h2("Build unification sets ")
build_tools = set()
for current_tag in ("build-tools", "compiler"):
build_tools.update(spack.repo.PATH.packages_with_tags(current_tag))
for name in sorted(self.possible_dependencies() & build_tools):
gen.fact(fn.multiple_unification_sets(name))
gen.newline()
gen.h2("Possible package in link-run subDAG")
for name in sorted(self._link_run):
gen.fact(fn.possible_in_link_run(name))
gen.newline()
counter = collections.Counter(
list(self._link_run_virtuals) + list(self._possible_virtuals)
)
gen.h2("Maximum number of virtual nodes")
for pkg, count in sorted(counter.items(), key=lambda x: (x[1], x[0])):
gen.fact(fn.max_dupes(pkg, count))
gen.newline()
def create_counter(
specs: List[spack.spec.Spec],
tests: spack.concretize.TestsType,
possible_graph: PossibleDependencyGraph,
) -> Counter:
strategy = spack.config.CONFIG.get("concretizer:duplicates:strategy", "none")
if strategy == "full":
return FullDuplicatesCounter(specs, tests=tests, possible_graph=possible_graph)
if strategy == "minimal":
return MinimalDuplicatesCounter(specs, tests=tests, possible_graph=possible_graph)
return NoDuplicatesCounter(specs, tests=tests, possible_graph=possible_graph)
| FullDuplicatesCounter |
python | explosion__spaCy | spacy/lang/kmr/__init__.py | {
"start": 218,
"end": 318
} | class ____(Language):
lang = "kmr"
Defaults = KurmanjiDefaults
__all__ = ["Kurmanji"]
| Kurmanji |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 1257,
"end": 1476
} | class ____(graphene.ObjectType):
schema = graphene.NonNull(GrapheneTableSchema)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "TableSchemaMetadataEntry"
| GrapheneTableSchemaMetadataEntry |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/executors/ecs/utils.py | {
"start": 3031,
"end": 3154
} | class ____(Exception):
"""Thrown when something unexpected has occurred within the ECS ecosystem."""
| EcsExecutorException |
python | apache__airflow | providers/standard/tests/unit/standard/operators/test_python.py | {
"start": 4160,
"end": 8165
} | class ____:
"""Base test class for TestPythonOperator and TestPythonSensor classes"""
opcls: type[BaseOperator]
dag_id: str
task_id: str
run_id: str
dag: DAG
ds_templated: str
default_date: datetime = DEFAULT_DATE
@pytest.fixture(autouse=True)
def base_tests_setup(self, request, create_serialized_task_instance_of_operator, dag_maker):
self.dag_id = f"dag_{slugify(request.cls.__name__)}"
self.task_id = f"task_{slugify(request.node.name, max_length=40)}"
self.run_id = f"run_{slugify(request.node.name, max_length=40)}"
self.ds_templated = self.default_date.date().isoformat()
self.ti_maker = create_serialized_task_instance_of_operator
self.dag_maker = dag_maker
self.dag_non_serialized = self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH).dag
# We need to entre the context in order to the factory to create things
with self.dag_maker:
...
clear_db_runs()
yield
clear_db_runs()
@staticmethod
def assert_expected_task_states(dag_run: DagRun, expected_states: dict):
"""Helper function that asserts `TaskInstances` of a given `task_id` are in a given state."""
asserts = []
for ti in dag_run.get_task_instances():
try:
expected = expected_states[ti.task_id]
except KeyError:
asserts.append(f"Unexpected task id {ti.task_id!r} found, expected {expected_states.keys()}")
continue
if ti.state != expected:
asserts.append(f"Task {ti.task_id!r} has state {ti.state!r} instead of expected {expected!r}")
if asserts:
pytest.fail("\n".join(asserts))
@staticmethod
def default_kwargs(**kwargs):
"""Default arguments for specific Operator."""
return kwargs
def create_dag_run(self) -> DagRun:
from airflow.models.serialized_dag import SerializedDagModel
# Update the serialized DAG with any tasks added after initial dag was created
if AIRFLOW_V_3_1_PLUS:
self.dag_maker.serialized_model = SerializedDagModel(
LazyDeserializedDAG.from_dag(self.dag_non_serialized)
)
else:
self.dag_maker.serialized_model = SerializedDagModel(self.dag_non_serialized)
return self.dag_maker.create_dagrun(
state=DagRunState.RUNNING,
start_date=self.dag_maker.start_date,
session=self.dag_maker.session,
logical_date=self.default_date,
run_type=DagRunType.MANUAL,
data_interval=(self.default_date, self.default_date),
)
def create_ti(self, fn, **kwargs) -> TI:
"""Create TaskInstance for class defined Operator."""
return self.ti_maker(
self.opcls,
python_callable=fn,
**self.default_kwargs(**kwargs),
dag_id=self.dag_id,
task_id=self.task_id,
logical_date=self.default_date,
)
def run_as_operator(self, fn, **kwargs):
"""Run task by direct call ``run`` method."""
clear_db_runs()
with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True):
task = self.opcls(task_id=self.task_id, python_callable=fn, **self.default_kwargs(**kwargs))
dr = self.dag_maker.create_dagrun()
self.dag_maker.run_ti(self.task_id, dr)
clear_db_runs()
return task
def run_as_task(self, fn, return_ti=False, **kwargs):
"""Create TaskInstance and run it."""
ti = self.create_ti(fn, **kwargs)
assert ti.task is not None
ti.run()
if return_ti:
return ti
return ti.task
def render_templates(self, fn, **kwargs):
"""Create TaskInstance and render templates without actual run."""
return self.create_ti(fn, **kwargs).render_templates()
| BasePythonTest |
python | huggingface__transformers | src/transformers/models/hgnet_v2/modeling_hgnet_v2.py | {
"start": 1859,
"end": 2318
} | class ____(nn.Module):
def __init__(self, scale_value: float = 1.0, bias_value: float = 0.0):
super().__init__()
self.scale = nn.Parameter(torch.tensor([scale_value]), requires_grad=True)
self.bias = nn.Parameter(torch.tensor([bias_value]), requires_grad=True)
def forward(self, hidden_state: Tensor) -> Tensor:
hidden_state = self.scale * hidden_state + self.bias
return hidden_state
| HGNetV2LearnableAffineBlock |
python | getsentry__sentry | src/sentry/integrations/bitbucket/client.py | {
"start": 1685,
"end": 7529
} | class ____(ApiClient, RepositoryClient):
"""
The API Client for the Bitbucket Integration
NOTE: repo is the fully qualified slug containing 'username/repo_slug'
"""
integration_name = IntegrationProviderSlug.BITBUCKET.value
def __init__(self, integration: RpcIntegration | Integration):
self.base_url = integration.metadata["base_url"]
self.shared_secret = integration.metadata["shared_secret"]
# subject is probably the clientKey
self.subject = integration.external_id
super().__init__(
integration_id=integration.id,
verify_ssl=True,
logging_context=None,
)
def finalize_request(self, prepared_request: PreparedRequest) -> PreparedRequest:
assert prepared_request.url is not None
assert prepared_request.method is not None
path = prepared_request.url[len(self.base_url) :]
url_params = dict(parse_qs(urlsplit(path).query))
path = path.split("?")[0]
jwt_payload = {
"iss": BITBUCKET_KEY,
"iat": datetime.datetime.utcnow(),
"exp": datetime.datetime.utcnow() + datetime.timedelta(seconds=5 * 60),
"qsh": get_query_hash(
uri=path, method=prepared_request.method.upper(), query_params=url_params
),
"sub": self.subject,
}
encoded_jwt = jwt.encode(jwt_payload, self.shared_secret)
prepared_request.headers["Authorization"] = f"JWT {encoded_jwt}"
return prepared_request
def get_issue(self, repo, issue_id):
return self.get(BitbucketAPIPath.issue.format(repo=repo, issue_id=issue_id))
def create_issue(self, repo, data):
return self.post(path=BitbucketAPIPath.issues.format(repo=repo), data=data)
def search_issues(self, repo: str, query: str) -> dict[str, Any]:
# Query filters can be found here:
# https://developer.atlassian.com/bitbucket/api/2/reference/meta/filtering#supp-endpoints
return self.get(path=BitbucketAPIPath.issues.format(repo=repo), params={"q": query})
def create_comment(self, repo, issue_id, data):
# Call the method as below:
# client.create_comment('repo', '1', {"content": {"raw": "Whatever you're commenting."}})
# https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Busername%7D/%7Brepo_slug%7D/issues/%7Bissue_id%7D/comments#post
return self.post(
path=BitbucketAPIPath.issue_comments.format(repo=repo, issue_id=issue_id), data=data
)
def get_repo(self, repo):
return self.get(BitbucketAPIPath.repository.format(repo=repo))
def get_repos(self, username):
return self.get(BitbucketAPIPath.repositories.format(username=username))
def search_repositories(self, username, query):
return self.get(
path=BitbucketAPIPath.repositories.format(username=username), params={"q": query}
)
def create_hook(self, repo, data):
return self.post(path=BitbucketAPIPath.repository_hooks.format(repo=repo), data=data)
def get_hooks(self, repo):
return self.get(path=BitbucketAPIPath.repository_hooks.format(repo=repo))
def delete_hook(self, repo, hook_id):
return self.delete(path=BitbucketAPIPath.repository_hook.format(repo=repo, uid=hook_id))
def get_commit_filechanges(self, repo, sha):
resp = self.get(
BitbucketAPIPath.repository_diff.format(repo=repo, spec=sha), allow_text=True
)
return patch_to_file_changes(resp.text)
def zip_commit_data(self, repo, commit_list):
for commit in commit_list:
commit.update({"patch_set": self.get_commit_filechanges(repo, commit["hash"])})
return commit_list
def get_last_commits(self, repo, end_sha):
# return api request that fetches last ~30 commits
# see https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Busername%7D/%7Brepo_slug%7D/commits/%7Brevision%7D
# using end_sha as parameter
data = self.get(BitbucketAPIPath.repository_commits.format(repo=repo, revision=end_sha))
return self.zip_commit_data(repo, data["values"])
def compare_commits(self, repo, start_sha, end_sha):
# where start_sha is oldest and end_sha is most recent
# see
# https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Busername%7D/%7Brepo_slug%7D/commits/%7Brevision%7D
commits: list[dict[str, Any]] = []
done = False
url = BitbucketAPIPath.repository_commits.format(repo=repo, revision=end_sha)
while not done and len(commits) < 90:
data = self.get(url)
for commit in data["values"]:
if commit["hash"] == start_sha:
done = True
break
commits.append(commit)
# move page forward
try:
url = data["next"]
except KeyError:
break
return self.zip_commit_data(repo, commits)
def check_file(self, repo: Repository, path: str, version: str | None) -> object | None:
return self.head_cached(
path=BitbucketAPIPath.source.format(
repo=repo.name,
sha=version,
path=path,
),
)
def get_file(
self, repo: Repository, path: str, ref: str | None, codeowners: bool = False
) -> str:
response = self.get_cached(
path=BitbucketAPIPath.source.format(
repo=repo.name,
sha=ref,
path=path,
),
allow_redirects=True,
raw_response=True,
)
return response.text
| BitbucketApiClient |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 148305,
"end": 156054
} | class ____:
def test_load_flow_from_source_on_flow_function(self):
assert hasattr(flow, "from_source")
class TestSync:
def test_load_flow_from_source_with_storage(self):
storage = MockStorage()
loaded_flow: Flow = Flow.from_source(
entrypoint="flows.py:test_flow", source=storage
)
# Check that the loaded flow is indeed an instance of Flow and has the expected name
assert isinstance(loaded_flow, Flow)
assert loaded_flow.name == "test-flow"
assert loaded_flow() == 1
def test_loaded_flow_to_deployment_has_storage(self):
storage = MockStorage()
loaded_flow = Flow.from_source(
entrypoint="flows.py:test_flow", source=storage
)
deployment = loaded_flow.to_deployment(name="test")
assert deployment.storage == storage
def test_loaded_flow_can_be_updated_with_options(self):
storage = MockStorage()
storage.set_base_path(Path.cwd())
loaded_flow = Flow.from_source(
entrypoint="flows.py:test_flow", source=storage
)
flow_with_options = loaded_flow.with_options(name="with_options")
deployment = flow_with_options.to_deployment(name="test")
assert deployment.storage == storage
def test_load_flow_from_source_with_url(self, monkeypatch: pytest.MonkeyPatch):
def mock_create_storage_from_source(url):
return MockStorage()
monkeypatch.setattr(
"prefect.runner.storage.create_storage_from_source",
mock_create_storage_from_source,
)
loaded_flow = Flow.from_source(
source="https://github.com/org/repo.git",
entrypoint="flows.py:test_flow",
)
# Check that the loaded flow is indeed an instance of Flow and has the expected name
assert isinstance(loaded_flow, Flow)
assert loaded_flow.name == "test-flow"
assert loaded_flow() == 1
def test_accepts_storage_blocks(self):
class FakeStorageBlock(Block):
_block_type_slug = "fake-storage-block"
code: str = dedent(
"""\
from prefect import flow
@flow
def test_flow():
return 1
"""
)
async def get_directory(self, local_path: str):
(Path(local_path) / "flows.py").write_text(self.code)
block = FakeStorageBlock()
loaded_flow = Flow.from_source(
entrypoint="flows.py:test_flow", source=block
)
assert loaded_flow() == 1
def test_raises_on_unsupported_type(self):
class UnsupportedType:
what_i_do_here = "who knows?"
with pytest.raises(TypeError, match="Unsupported source type"):
Flow.from_source(
entrypoint="flows.py:test_flow", source=UnsupportedType()
)
async def test_raises_on_unsupported_type_async(self):
class UnsupportedType:
what_i_do_here = "who knows?"
with pytest.raises(TypeError, match="Unsupported source type"):
await Flow.afrom_source(
entrypoint="flows.py:test_flow", source=UnsupportedType()
)
def test_no_pull_for_local_storage(self, monkeypatch: pytest.MonkeyPatch):
from prefect.runner.storage import LocalStorage
storage = LocalStorage(path="/tmp/test")
mock_load_flow = MagicMock(return_value=MagicMock(spec=Flow))
monkeypatch.setattr(
"prefect.flows.load_flow_from_entrypoint", mock_load_flow
)
pull_code_spy = AsyncMock()
monkeypatch.setattr(LocalStorage, "pull_code", pull_code_spy)
Flow.from_source(entrypoint="flows.py:test_flow", source=storage)
pull_code_spy.assert_not_called()
class TestAsync:
async def test_load_flow_from_source_with_storage(self):
storage = MockStorage()
loaded_flow: Flow = await Flow.from_source(
entrypoint="flows.py:test_flow", source=storage
)
# Check that the loaded flow is indeed an instance of Flow and has the expected name
assert isinstance(loaded_flow, Flow)
assert loaded_flow.name == "test-flow"
assert loaded_flow() == 1
async def test_loaded_flow_to_deployment_has_storage(self):
storage = MockStorage()
loaded_flow = await Flow.from_source(
entrypoint="flows.py:test_flow", source=storage
)
deployment = await loaded_flow.ato_deployment(name="test")
assert deployment.storage == storage
async def test_loaded_flow_can_be_updated_with_options(self):
storage = MockStorage()
storage.set_base_path(Path.cwd())
loaded_flow = await Flow.afrom_source(
entrypoint="flows.py:test_flow", source=storage
)
flow_with_options = loaded_flow.with_options(name="with_options")
deployment = await flow_with_options.ato_deployment(name="test")
assert deployment.storage == storage
async def test_load_flow_from_source_with_url(
self, monkeypatch: pytest.MonkeyPatch
):
def mock_create_storage_from_source(url):
return MockStorage()
monkeypatch.setattr(
"prefect.runner.storage.create_storage_from_source",
mock_create_storage_from_source,
)
loaded_flow = await Flow.afrom_source(
source="https://github.com/org/repo.git",
entrypoint="flows.py:test_flow",
)
# Check that the loaded flow is indeed an instance of Flow and has the expected name
assert isinstance(loaded_flow, Flow)
assert loaded_flow.name == "test-flow"
assert loaded_flow() == 1
async def test_accepts_storage_blocks(self):
class FakeStorageBlock(Block):
_block_type_slug = "fake-storage-block"
code: str = dedent(
"""\
from prefect import flow
@flow
def test_flow():
return 1
"""
)
async def get_directory(self, local_path: str):
(Path(local_path) / "flows.py").write_text(self.code)
block = FakeStorageBlock()
loaded_flow = await Flow.afrom_source(
entrypoint="flows.py:test_flow", source=block
)
assert loaded_flow() == 1
async def test_no_pull_for_local_storage(self, monkeypatch: pytest.MonkeyPatch):
from prefect.runner.storage import LocalStorage
storage = LocalStorage(path="/tmp/test")
mock_load_flow = AsyncMock(return_value=MagicMock(spec=Flow))
monkeypatch.setattr(
"prefect.flows.load_flow_from_entrypoint", mock_load_flow
)
pull_code_spy = AsyncMock()
monkeypatch.setattr(LocalStorage, "pull_code", pull_code_spy)
await Flow.afrom_source(entrypoint="flows.py:test_flow", source=storage)
pull_code_spy.assert_not_called()
| TestFlowFromSource |
python | ray-project__ray | python/ray/llm/_internal/serve/engines/vllm/kv_transfer/base.py | {
"start": 201,
"end": 2911
} | class ____(abc.ABC):
def __init__(self, llm_config: "LLMConfig"):
"""Base class for connector backends.
Args:
llm_config: The llm configuration for this engine
"""
self.llm_config = llm_config
@property
def kv_transfer_config(self) -> Dict[str, Any]:
engine_kwargs = self.llm_config.engine_kwargs
kv_transfer_config = engine_kwargs.get("kv_transfer_config")
assert (
kv_transfer_config is not None
), "In Connector backend, kv_transfer_config is not set"
return kv_transfer_config
def _get_unique_suffix(self, len: int = 6) -> str:
"""Generates unique alphanumeric suffix.
Args:
len: Length of the suffix to generate.
Returns:
A unique alphanumeric suffix string of specified length.
"""
return "".join(random.choices(string.ascii_letters + string.digits, k=len))
def _compute_port_offset(self) -> int:
"""Compute a deterministic port offset for this replica.
Uses data_parallel_rank if DP case, otherwise falls back to
the replica rank assigned by Ray Serve (TP/PP case).
For TP/PP cases, multiply by num_devices (tp × pp) to reserve
sufficient port space, since each worker needs a unique port.
Each TP worker adds its tp_rank (0, 1, ..., tp_size-1) to the
base port at bind time, and PP stages also need separate ports.
Returns:
Non-negative integer offset to add to a base port.
"""
# Prefer explicit DP rank when available
dp_rank = self.llm_config.engine_kwargs.get("data_parallel_rank")
if isinstance(dp_rank, int) and dp_rank >= 0:
# vLLM already accounts for TP spacing in DP offset calculation
# (data_parallel_rank × tp_size), don't multiply here
return dp_rank
# Fall back to Serve replica rank for TP/PP cases
try:
rc = serve.get_replica_context()
if rc and hasattr(rc, "rank"):
# Use num_devices (tp × pp) to reserve ports for all workers
# Each replica spawns num_devices workers, each needing a unique port
engine_config = self.llm_config.get_engine_config()
num_devices = engine_config.num_devices
return rc.rank * num_devices
except Exception:
# Best-effort fallback; avoid introducing failures in setup paths
pass
return 0
def setup(self) -> None:
"""Setup the connector backend.
This method is called to setup the connector backend.
"""
pass
| BaseConnectorBackend |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 145195,
"end": 149908
} | class ____(DataplexCatalogBaseOperator):
"""
Update an AspectType resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogUpdateAspectTypeOperator`
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param location: Required. The ID of the Google Cloud region that the task belongs to.
:param update_mask: Optional. Names of fields whose values to overwrite on an entry group.
If this parameter is absent or empty, all modifiable fields are overwritten. If such
fields are non-required and omitted in the request body, their values are emptied.
:param aspect_type_id: Required. ID of the AspectType to update.
:param aspect_type_configuration: Required. The updated configuration body of the AspectType.
For more details please see API documentation:
https://cloud.google.com/dataplex/docs/reference/rest/v1/projects.locations.aspectTypes#AspectType
:param validate_only: Optional. The service validates the request without performing any mutations.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param gcp_conn_id: Optional. The connection ID to use when fetching connection info.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"aspect_type_id", "aspect_type_configuration", "update_mask"}
| set(DataplexCatalogBaseOperator.template_fields)
)
operator_extra_links = (DataplexCatalogAspectTypeLink(),)
def __init__(
self,
aspect_type_id: str,
aspect_type_configuration: dict | AspectType,
update_mask: list[str] | FieldMask | None = None,
validate_request: bool | None = False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.aspect_type_id = aspect_type_id
self.aspect_type_configuration = aspect_type_configuration
self.update_mask = update_mask
self.validate_request = validate_request
@property
def extra_links_params(self) -> dict[str, Any]:
return {
**super().extra_links_params,
"aspect_type_id": self.aspect_type_id,
}
def execute(self, context: Context):
DataplexCatalogAspectTypeLink.persist(context=context)
if self.validate_request:
self.log.info("Validating an Update Dataplex Catalog AspectType request.")
else:
self.log.info(
"Updating Dataplex Catalog AspectType %s.",
self.aspect_type_id,
)
try:
operation = self.hook.update_aspect_type(
location=self.location,
project_id=self.project_id,
aspect_type_id=self.aspect_type_id,
aspect_type_configuration=self.aspect_type_configuration,
update_mask=self.update_mask,
validate_only=self.validate_request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
aspect_type = self.hook.wait_for_operation(timeout=self.timeout, operation=operation)
except NotFound as ex:
self.log.info("Specified AspectType was not found.")
raise AirflowException(ex)
except Exception as exc:
raise AirflowException(exc)
else:
result = AspectType.to_dict(aspect_type) if not self.validate_request else None
if not self.validate_request:
self.log.info("AspectType %s was successfully updated.", self.aspect_type_id)
return result
| DataplexCatalogUpdateAspectTypeOperator |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 5666,
"end": 5708
} | class ____(Opcode):
__slots__ = ()
| POP_TOP |
python | tiangolo__fastapi | tests/test_response_model_data_filter_no_inheritance.py | {
"start": 374,
"end": 1746
} | class ____(BaseModel):
name: str
owner: User
@app.post("/users/", response_model=User)
async def create_user(user: UserCreate):
return user
@app.get("/pets/{pet_id}", response_model=PetOut)
async def read_pet(pet_id: int):
user = UserDB(
email="johndoe@example.com",
hashed_password="secrethashed",
)
pet = PetDB(name="Nibbler", owner=user)
return pet
@app.get("/pets/", response_model=List[PetOut])
async def read_pets():
user = UserDB(
email="johndoe@example.com",
hashed_password="secrethashed",
)
pet1 = PetDB(name="Nibbler", owner=user)
pet2 = PetDB(name="Zoidberg", owner=user)
return [pet1, pet2]
client = TestClient(app)
def test_filter_top_level_model():
response = client.post(
"/users", json={"email": "johndoe@example.com", "password": "secret"}
)
assert response.json() == {"email": "johndoe@example.com"}
def test_filter_second_level_model():
response = client.get("/pets/1")
assert response.json() == {
"name": "Nibbler",
"owner": {"email": "johndoe@example.com"},
}
def test_list_of_models():
response = client.get("/pets/")
assert response.json() == [
{"name": "Nibbler", "owner": {"email": "johndoe@example.com"}},
{"name": "Zoidberg", "owner": {"email": "johndoe@example.com"}},
]
| PetOut |
python | mlflow__mlflow | tests/resources/mlflow-test-plugin/mlflow_test_plugin/sqlalchemy_store.py | {
"start": 96,
"end": 327
} | class ____(SqlAlchemyStore):
def __init__(self, store_uri=None):
path = urllib.parse.urlparse(store_uri).path if store_uri else None
self.is_plugin = True
super().__init__(path)
| PluginRegistrySqlAlchemyStore |
python | numpy__numpy | numpy/ma/tests/test_mrecords.py | {
"start": 15469,
"end": 19837
} | class ____:
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array([b'one', b'two', b'three'],
mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(b'99999', b'99999.',
b'N/A'))
nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
data = (mrec, nrec, ddtype)
def test_fromarrays(self):
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
(mrec, nrec, _) = self.data
for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)):
assert_equal(getattr(mrec, f)._mask, l._mask)
# One record only
_x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0], dtype=object)
assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0])
def test_fromrecords(self):
# Test construction from records.
(mrec, nrec, ddtype) = self.data
# ......
palist = [(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)]
pa = recfromrecords(palist, names='c1, c2, c3, c4')
mpa = fromrecords(palist, names='c1, c2, c3, c4')
assert_equal_records(pa, mpa)
# .....
_mrec = fromrecords(nrec)
assert_equal(_mrec.dtype, mrec.dtype)
for field in _mrec.dtype.names:
assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
_mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')
assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')])
for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')):
assert_equal(getattr(_mrec, f), getattr(mrec._data, n))
_mrec = fromrecords(mrec)
assert_equal(_mrec.dtype, mrec.dtype)
assert_equal_records(_mrec._data, mrec.filled())
assert_equal_records(_mrec._mask, mrec._mask)
def test_fromrecords_wmask(self):
# Tests construction from records w/ mask.
(mrec, nrec, ddtype) = self.data
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,])
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
_mrec = fromrecords(nrec.tolist(), dtype=ddtype,
mask=mrec._mask.tolist())
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
def test_fromtextfile(self):
# Tests reading from a text file.
fcontent = (
"""#
'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)'
'strings',1,1.0,'mixed column',,1
'with embedded "double quotes"',2,2.0,1.0,,1
'strings',3,3.0E5,3,,1
'strings',4,-1e-10,,,1
""")
with temppath() as path:
with open(path, 'w') as f:
f.write(fcontent)
mrectxt = fromtextfile(path, delimiter=',', varnames='ABCDEFG')
assert_(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])
def test_addfield(self):
# Tests addfield
(mrec, nrec, ddtype) = self.data
(d, m) = ([100, 200, 300], [1, 0, 0])
mrec = addfield(mrec, ma.array(d, mask=m))
assert_equal(mrec.f3, d)
assert_equal(mrec.f3._mask, m)
def test_record_array_with_object_field():
# Trac #1839
y = ma.masked_array(
[(1, '2'), (3, '4')],
mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', object)])
# getting an item used to fail
y[1]
| TestMRecordsImport |
python | streamlit__streamlit | lib/tests/streamlit/elements/json_test.py | {
"start": 912,
"end": 4043
} | class ____(DeltaGeneratorTestCase):
"""Test Public Streamlit Public APIs."""
def test_st_json(self):
"""Test st.json."""
st.json('{"some": "json"}')
el = self.get_delta_from_queue().new_element
assert el.json.body == '{"some": "json"}'
assert el.json.expanded is True
assert el.json.HasField("max_expand_depth") is False
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
# Test that an object containing non-json-friendly keys can still
# be displayed. Resultant json body will be missing those keys.
n = np.array([1, 2, 3, 4, 5])
data = {n[0]: "this key will not render as JSON", "array": n}
st.json(data)
el = self.get_delta_from_queue().new_element
assert el.json.body == '{"array": "array([1, 2, 3, 4, 5])"}'
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
def test_expanded_param(self):
"""Test expanded parameter for `st.json`"""
st.json(
{
"level1": {"level2": {"level3": {"a": "b"}}, "c": "d"},
},
expanded=2,
)
el = self.get_delta_from_queue().new_element
assert el.json.expanded is True
assert el.json.max_expand_depth == 2
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
with pytest.raises(TypeError):
st.json(
{
"level1": {"level2": {"level3": {"a": "b"}}, "c": "d"},
},
expanded=["foo"], # type: ignore
)
def test_st_json_with_width_pixels(self):
"""Test st.json with width in pixels."""
st.json('{"some": "json"}', width=500)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == 500
def test_st_json_with_width_stretch(self):
"""Test st.json with stretch width."""
st.json('{"some": "json"}', width="stretch")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
@parameterized.expand(
[
"invalid",
-100,
0,
100.5,
None,
]
)
def test_st_json_with_invalid_width(self, width):
"""Test st.json with invalid width values."""
with pytest.raises(StreamlitInvalidWidthError) as e:
st.json('{"some": "json"}', width=width)
assert "Invalid width" in str(e.value)
| StJsonAPITest |
python | huggingface__transformers | src/transformers/activations.py | {
"start": 7483,
"end": 7780
} | class ____(nn.Module):
"""
Applies the relu^2 activation introduced in https://huggingface.co/papers/2109.08668v2
"""
def forward(self, input):
relu_applied = nn.functional.relu(input)
squared = torch.square(relu_applied)
return squared
| ReLUSquaredActivation |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/task_instances.py | {
"start": 2817,
"end": 3004
} | class ____(BaseModel):
"""Task Instance Collection serializer for responses."""
task_instances: Iterable[TaskInstanceResponse]
total_entries: int
| TaskInstanceCollectionResponse |
python | run-llama__llama_index | llama-index-integrations/graph_stores/llama-index-graph-stores-neptune/llama_index/graph_stores/neptune/base_property_graph.py | {
"start": 601,
"end": 13770
} | class ____(PropertyGraphStore):
supports_structured_queries: bool = True
text_to_cypher_template: PromptTemplate = DEFAULT_CYPHER_TEMPALTE
schema = None
structured_schema = None
def __init__() -> None:
pass
@property
def client(self) -> Any:
return self._client
def get(
self, properties: Dict = None, ids: List[str] = None, exact_match: bool = True
) -> List[LabelledNode]:
"""
Get the nodes from the graph.
Args:
properties (Dict | None, optional): The properties to retrieve. Defaults to None.
ids (List[str] | None, optional): A list of ids to find in the graph. Defaults to None.
exact_match (bool, optional): Whether to do exact match on properties. Defaults to True.
Returns:
List[LabelledNode]: A list of nodes returned
"""
cypher_statement = "MATCH (e) "
params = {}
if properties or ids:
cypher_statement += "WHERE "
if ids:
if exact_match:
cypher_statement += "e.id IN $ids "
else:
cypher_statement += "WHERE size([x IN $ids where toLower(e.id) CONTAINS toLower(x)]) > 0 "
params["ids"] = ids
if properties:
prop_list = []
for i, prop in enumerate(properties):
prop_list.append(f"e.`{prop}` = $property_{i}")
params[f"property_{i}"] = properties[prop]
cypher_statement += " AND ".join(prop_list)
return_statement = (
"""
WITH e
RETURN e.id AS name,
[l in labels(e) WHERE l <> '"""
+ BASE_ENTITY_LABEL
+ """' | l][0] AS type,
e{.* , embedding: Null, id: Null} AS properties
"""
)
cypher_statement += return_statement
response = self.structured_query(cypher_statement, param_map=params)
response = response if response else []
nodes = []
for record in response:
# text indicates a chunk node
# none on the type indicates an implicit node, likely a chunk node
if "text" in record["properties"] or record["type"] is None:
text = record["properties"].pop("text", "")
nodes.append(
ChunkNode(
id_=record["name"],
text=text,
properties=remove_empty_values(record["properties"]),
)
)
else:
nodes.append(
EntityNode(
name=record["name"],
label=record["type"],
properties=remove_empty_values(record["properties"]),
)
)
return nodes
def get_triplets(
self,
entity_names: Optional[List[str]] = None,
relation_names: Optional[List[str]] = None,
properties: Optional[dict] = None,
ids: Optional[List[str]] = None,
) -> List[Triplet]:
"""
Get the triplets of the entities in the graph.
Args:
entity_names (Optional[List[str]], optional): The entity names to find. Defaults to None.
relation_names (Optional[List[str]], optional): The relation names to follow. Defaults to None.
properties (Optional[dict], optional): The properties to return. Defaults to None.
ids (Optional[List[str]], optional): The ids to search on. Defaults to None.
Returns:
List[Triplet]: A list of triples
"""
cypher_statement = f"MATCH (e:`{BASE_ENTITY_LABEL}`) "
params = {}
if entity_names or properties or ids:
cypher_statement += "WHERE "
if entity_names:
cypher_statement += "e.name in $entity_names "
params["entity_names"] = entity_names
if ids:
cypher_statement += "e.id in $ids "
params["ids"] = ids
if properties:
prop_list = []
for i, prop in enumerate(properties):
prop_list.append(f"e.`{prop}` = $property_{i}")
params[f"property_{i}"] = properties[prop]
cypher_statement += " AND ".join(prop_list)
return_statement = f"""
WITH e
MATCH (e)-[r{":`" + "`|`".join(relation_names) + "`" if relation_names else ""}]->(t:{BASE_ENTITY_LABEL})
RETURN e.name AS source_id, [l in labels(e) WHERE l <> '{BASE_ENTITY_LABEL}' | l][0] AS source_type,
e{{.* , embedding: Null, name: Null}} AS source_properties,
type(r) AS type,
r{{.*}} AS rel_properties,
t.name AS target_id, [l in labels(t) WHERE l <> '{BASE_ENTITY_LABEL}' | l][0] AS target_type,
t{{.* , embedding: Null, name: Null}} AS target_properties"""
cypher_statement += return_statement
data = self.structured_query(cypher_statement, param_map=params)
data = data if data else []
triples = []
for record in data:
source = EntityNode(
name=record["source_id"],
label=record["source_type"],
properties=remove_empty_values(record["source_properties"]),
)
target = EntityNode(
name=record["target_id"],
label=record["target_type"],
properties=remove_empty_values(record["target_properties"]),
)
rel = Relation(
source_id=record["source_id"],
target_id=record["target_id"],
label=record["type"],
properties=remove_empty_values(record["rel_properties"]),
)
triples.append([source, rel, target])
return triples
def get_rel_map(
self,
graph_nodes: List[LabelledNode],
depth: int = 2,
limit: int = 30,
ignore_rels: List[str] = None,
) -> List[Tuple[Any]]:
"""
Get a depth aware map of relations.
Args:
graph_nodes (List[LabelledNode]): The nodes
depth (int, optional): The depth to traverse. Defaults to 2.
limit (int, optional): The limit of numbers to return. Defaults to 30.
ignore_rels (List[str] | None, optional): Relations to ignore. Defaults to None.
Returns:
List[Tuple[LabelledNode | Relation]]: The node/relationship pairs
"""
triples = []
ids = [node.id for node in graph_nodes]
# Needs some optimization
if len(ids) > 0:
logger.debug(f"get_rel_map() ids: {ids}")
response = self.structured_query(
f"""
WITH $ids AS id_list
UNWIND range(0, size(id_list) - 1) AS idx
MATCH (e:`{BASE_ENTITY_LABEL}`)
WHERE e.id = id_list[idx]
MATCH p=(e)-[r*1..{depth}]-(other)
WHERE size([rel in relationships(p) WHERE type(rel) <> 'MENTIONS']) = size(relationships(p))
UNWIND relationships(p) AS rel
WITH distinct rel, idx
WITH startNode(rel) AS source,
type(rel) AS type,
endNode(rel) AS endNode,
idx
LIMIT {limit}
RETURN source.id AS source_id, [l in labels(source) WHERE l <> '{BASE_ENTITY_LABEL}' | l][0] AS source_type,
source{{.* , embedding: Null, id: Null}} AS source_properties,
type,
endNode.id AS target_id, [l in labels(endNode) WHERE l <> '{BASE_ENTITY_LABEL}' | l][0] AS target_type,
endNode{{.* , embedding: Null, id: Null}} AS target_properties,
idx
ORDER BY idx
LIMIT {limit}
""",
param_map={"ids": ids},
)
else:
response = []
response = response if response else []
ignore_rels = ignore_rels or []
for record in response:
if record["type"] in ignore_rels:
continue
source = EntityNode(
name=record["source_id"],
label=record["source_type"],
properties=remove_empty_values(record["source_properties"]),
)
target = EntityNode(
name=record["target_id"],
label=record["target_type"],
properties=remove_empty_values(record["target_properties"]),
)
rel = Relation(
source_id=record["source_id"],
target_id=record["target_id"],
label=record["type"],
)
triples.append([source, rel, target])
return triples
@abstractmethod
def upsert_nodes(self, nodes: List[LabelledNode]) -> None:
raise NotImplementedError
def upsert_relations(self, relations: List[Relation]) -> None:
"""
Upsert relations in the graph.
Args:
relations (List[Relation]): Relations to upsert
"""
for r in relations:
self.structured_query(
"""
WITH $data AS row
MERGE (source {id: row.source_id})
ON CREATE SET source:Chunk
MERGE (target {id: row.target_id})
ON CREATE SET target:Chunk
MERGE (source)-[r:`"""
+ r.label
+ """`]->(target)
SET r+= removeKeyFromMap(row.properties, '')
RETURN count(*)
""",
param_map={"data": r.dict()},
)
def delete(
self,
entity_names: List[str] = None,
relation_names: List[str] = None,
properties: Dict = None,
ids: List[str] = None,
) -> None:
"""
Delete data matching the criteria.
Args:
entity_names (List[str] | None, optional): The entity names to delete. Defaults to None.
relation_names (List[str] | None, optional): The relation names to delete. Defaults to None.
properties (Dict | None, optional): The properties to remove. Defaults to None.
ids (List[str] | None, optional): The ids to remove. Defaults to None.
"""
if entity_names:
self.structured_query(
"MATCH (n) WHERE n.name IN $entity_names DETACH DELETE n",
param_map={"entity_names": entity_names},
)
if ids:
self.structured_query(
"MATCH (n) WHERE n.id IN $ids DETACH DELETE n",
param_map={"ids": ids},
)
if relation_names:
for rel in relation_names:
self.structured_query(f"MATCH ()-[r:`{rel}`]->() DELETE r")
if properties:
cypher = "MATCH (e) WHERE "
prop_list = []
params = {}
for i, prop in enumerate(properties):
prop_list.append(f"e.`{prop}` = $property_{i}")
params[f"property_{i}"] = properties[prop]
cypher += " AND ".join(prop_list)
self.structured_query(cypher + " DETACH DELETE e", param_map=params)
@abstractmethod
def structured_query(self, query: str, param_map: Dict[str, Any] = None) -> Any:
raise NotImplementedError
def query(self, query: str, params: dict = {}) -> Dict[str, Any]:
"""
Run the query.
Args:
query (str): The query to run
params (dict, optional): The query parameters. Defaults to {}.
Returns:
Dict[str, Any]: The query results
"""
return self.structured_query(query, params)
@abstractmethod
def vector_query(self, query: VectorStoreQuery, **kwargs: Any) -> Tuple[List[Any]]:
raise NotImplementedError
@abstractmethod
def _get_summary(self) -> Dict:
raise NotImplementedError
def get_schema(self, refresh: bool = False) -> Any:
"""Get the schema of the graph store."""
if refresh or not self.schema:
schema = refresh_schema(self.query, self._get_summary())
self.schema = schema["schema_str"]
self.structured_schema = schema["structured_schema"]
return self.structured_schema
def get_schema_str(self, refresh: bool = False) -> str:
"""
Get the schema as a string.
Args:
refresh (bool, optional): True to force refresh of the schema. Defaults to False.
Returns:
str: A string description of the schema
"""
if refresh or not self.schema:
schema = refresh_schema(self.query, self._get_summary())
self.schema = schema["schema_str"]
self.structured_schema = schema["structured_schema"]
return self.schema
| NeptuneBasePropertyGraph |
python | falconry__falcon | tests/test_headers.py | {
"start": 7953,
"end": 8044
} | class ____:
def items(self):
return [('test-header', 'test-value')]
| CustomHeaders |
python | google__flatbuffers | python/flatbuffers/flexbuffers.py | {
"start": 4770,
"end": 8240
} | class ____(enum.IntEnum):
"""Supported types of encoded data.
These are used as the upper 6 bits of a type field to indicate the actual
type.
"""
NULL = 0
INT = 1
UINT = 2
FLOAT = 3
# Types above stored inline, types below store an offset.
KEY = 4
STRING = 5
INDIRECT_INT = 6
INDIRECT_UINT = 7
INDIRECT_FLOAT = 8
MAP = 9
VECTOR = 10 # Untyped.
VECTOR_INT = 11 # Typed any size (stores no type table).
VECTOR_UINT = 12
VECTOR_FLOAT = 13
VECTOR_KEY = 14
# DEPRECATED, use VECTOR or VECTOR_KEY instead.
# Read test.cpp/FlexBuffersDeprecatedTest() for details on why.
VECTOR_STRING_DEPRECATED = 15
VECTOR_INT2 = 16 # Typed tuple (no type table, no size field).
VECTOR_UINT2 = 17
VECTOR_FLOAT2 = 18
VECTOR_INT3 = 19 # Typed triple (no type table, no size field).
VECTOR_UINT3 = 20
VECTOR_FLOAT3 = 21
VECTOR_INT4 = 22 # Typed quad (no type table, no size field).
VECTOR_UINT4 = 23
VECTOR_FLOAT4 = 24
BLOB = 25
BOOL = 26
VECTOR_BOOL = 36 # To do the same type of conversion of type to vector type
@staticmethod
def Pack(type_, bit_width):
return (int(type_) << 2) | bit_width
@staticmethod
def Unpack(packed_type):
return 1 << (packed_type & 0b11), Type(packed_type >> 2)
@staticmethod
def IsInline(type_):
return type_ <= Type.FLOAT or type_ == Type.BOOL
@staticmethod
def IsTypedVector(type_):
return (
Type.VECTOR_INT <= type_ <= Type.VECTOR_STRING_DEPRECATED
or type_ == Type.VECTOR_BOOL
)
@staticmethod
def IsTypedVectorElementType(type_):
return Type.INT <= type_ <= Type.STRING or type_ == Type.BOOL
@staticmethod
def ToTypedVectorElementType(type_):
if not Type.IsTypedVector(type_):
raise ValueError('must be typed vector type')
return Type(type_ - Type.VECTOR_INT + Type.INT)
@staticmethod
def IsFixedTypedVector(type_):
return Type.VECTOR_INT2 <= type_ <= Type.VECTOR_FLOAT4
@staticmethod
def IsFixedTypedVectorElementType(type_):
return Type.INT <= type_ <= Type.FLOAT
@staticmethod
def ToFixedTypedVectorElementType(type_):
if not Type.IsFixedTypedVector(type_):
raise ValueError('must be fixed typed vector type')
# 3 types each, starting from length 2.
fixed_type = type_ - Type.VECTOR_INT2
return Type(fixed_type % 3 + Type.INT), fixed_type // 3 + 2
@staticmethod
def ToTypedVector(element_type, fixed_len=0):
"""Converts element type to corresponding vector type.
Args:
element_type: vector element type
fixed_len: number of elements: 0 for typed vector; 2, 3, or 4 for fixed
typed vector.
Returns:
Typed vector type or fixed typed vector type.
"""
if fixed_len == 0:
if not Type.IsTypedVectorElementType(element_type):
raise ValueError('must be typed vector element type')
else:
if not Type.IsFixedTypedVectorElementType(element_type):
raise ValueError('must be fixed typed vector element type')
offset = element_type - Type.INT
if fixed_len == 0:
return Type(offset + Type.VECTOR_INT) # TypedVector
elif fixed_len == 2:
return Type(offset + Type.VECTOR_INT2) # FixedTypedVector
elif fixed_len == 3:
return Type(offset + Type.VECTOR_INT3) # FixedTypedVector
elif fixed_len == 4:
return Type(offset + Type.VECTOR_INT4) # FixedTypedVector
else:
raise ValueError('unsupported fixed_len: %s' % fixed_len)
| Type |
python | coleifer__peewee | peewee.py | {
"start": 264830,
"end": 268603
} | class ____(DictCursorWrapper):
def __init__(self, cursor, model, columns):
super(BaseModelCursorWrapper, self).__init__(cursor)
self.model = model
self.select = columns or []
def _initialize_columns(self):
combined = self.model._meta.combined
table = self.model._meta.table
description = self.cursor.description
self.ncols = len(self.cursor.description)
self.columns = []
self.converters = converters = [None] * self.ncols
self.fields = fields = [None] * self.ncols
for idx, description_item in enumerate(description):
column = orig_column = description_item[0]
# Try to clean-up messy column descriptions when people do not
# provide an alias. The idea is that we take something like:
# SUM("t1"."price") -> "price") -> price
dot_index = column.rfind('.')
if dot_index != -1:
column = column[dot_index + 1:]
column = column.strip('()"`')
self.columns.append(column)
# Now we'll see what they selected and see if we can improve the
# column-name being returned - e.g. by mapping it to the selected
# field's name.
try:
raw_node = self.select[idx]
except IndexError:
if column in combined:
raw_node = node = combined[column]
else:
continue
else:
node = raw_node.unwrap()
# If this column was given an alias, then we will use whatever
# alias was returned by the cursor.
is_alias = raw_node.is_alias()
if is_alias:
self.columns[idx] = orig_column
# Heuristics used to attempt to get the field associated with a
# given SELECT column, so that we can accurately convert the value
# returned by the database-cursor into a Python object.
if isinstance(node, Field):
if raw_node._coerce:
converters[idx] = node.python_value
fields[idx] = node
if not is_alias:
self.columns[idx] = node.name
elif isinstance(node, ColumnBase) and raw_node._converter:
converters[idx] = raw_node._converter
elif isinstance(node, Function) and node._coerce:
if node._python_value is not None:
converters[idx] = node._python_value
elif node.arguments and isinstance(node.arguments[0], Node):
# If the first argument is a field or references a column
# on a Model, try using that field's conversion function.
# This usually works, but we use "safe_python_value()" so
# that if a TypeError or ValueError occurs during
# conversion we can just fall-back to the raw cursor value.
first = node.arguments[0].unwrap()
if isinstance(first, Entity):
path = first._path[-1] # Try to look-up by name.
first = combined.get(path)
if isinstance(first, Field):
converters[idx] = safe_python_value(first.python_value)
elif column in combined:
if node._coerce:
converters[idx] = combined[column].python_value
if isinstance(node, Column) and node.source == table:
fields[idx] = combined[column]
initialize = _initialize_columns
def process_row(self, row):
raise NotImplementedError
| BaseModelCursorWrapper |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_stackdriver.py | {
"start": 9147,
"end": 9882
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.stackdriver.StackdriverHook")
def test_execute(self, mock_hook):
operator = StackdriverUpsertNotificationChannelOperator(
task_id=TEST_TASK_ID,
channels=json.dumps({"channels": [TEST_NOTIFICATION_CHANNEL_1, TEST_NOTIFICATION_CHANNEL_2]}),
)
operator.execute(context=mock.MagicMock())
mock_hook.return_value.upsert_channel.assert_called_once_with(
channels=json.dumps({"channels": [TEST_NOTIFICATION_CHANNEL_1, TEST_NOTIFICATION_CHANNEL_2]}),
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestStackdriverUpsertChannelOperator |
python | sympy__sympy | sympy/physics/quantum/identitysearch.py | {
"start": 20242,
"end": 27607
} | class ____(Basic):
"""Wrapper class for circuits that reduce to a scalar value.
A gate identity is a quantum circuit such that the product
of the gates in the circuit is equal to a scalar value.
For example, XYZ = i, where X, Y, Z are the Pauli gates and
i is the imaginary value, is considered a gate identity.
Parameters
==========
args : Gate tuple
A variable length tuple of Gates that form an identity.
Examples
========
Create a GateIdentity and look at its attributes:
>>> from sympy.physics.quantum.identitysearch import GateIdentity
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> an_identity = GateIdentity(x, y, z)
>>> an_identity.circuit
X(0)*Y(0)*Z(0)
>>> an_identity.equivalent_ids
{(X(0), Y(0), Z(0)), (X(0), Z(0), Y(0)), (Y(0), X(0), Z(0)),
(Y(0), Z(0), X(0)), (Z(0), X(0), Y(0)), (Z(0), Y(0), X(0))}
"""
def __new__(cls, *args):
# args should be a tuple - a variable length argument list
obj = Basic.__new__(cls, *args)
obj._circuit = Mul(*args)
obj._rules = generate_gate_rules(args)
obj._eq_ids = generate_equivalent_ids(args)
return obj
@property
def circuit(self):
return self._circuit
@property
def gate_rules(self):
return self._rules
@property
def equivalent_ids(self):
return self._eq_ids
@property
def sequence(self):
return self.args
def __str__(self):
"""Returns the string of gates in a tuple."""
return str(self.circuit)
def is_degenerate(identity_set, gate_identity):
"""Checks if a gate identity is a permutation of another identity.
Parameters
==========
identity_set : set
A Python set with GateIdentity objects.
gate_identity : GateIdentity
The GateIdentity to check for existence in the set.
Examples
========
Check if the identity is a permutation of another identity:
>>> from sympy.physics.quantum.identitysearch import (
... GateIdentity, is_degenerate)
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> an_identity = GateIdentity(x, y, z)
>>> id_set = {an_identity}
>>> another_id = (y, z, x)
>>> is_degenerate(id_set, another_id)
True
>>> another_id = (x, x)
>>> is_degenerate(id_set, another_id)
False
"""
# For now, just iteratively go through the set and check if the current
# gate_identity is a permutation of an identity in the set
for an_id in identity_set:
if (gate_identity in an_id.equivalent_ids):
return True
return False
def is_reducible(circuit, nqubits, begin, end):
"""Determines if a circuit is reducible by checking
if its subcircuits are scalar values.
Parameters
==========
circuit : Gate tuple
A tuple of Gates representing a circuit. The circuit to check
if a gate identity is contained in a subcircuit.
nqubits : int
The number of qubits the circuit operates on.
begin : int
The leftmost gate in the circuit to include in a subcircuit.
end : int
The rightmost gate in the circuit to include in a subcircuit.
Examples
========
Check if the circuit can be reduced:
>>> from sympy.physics.quantum.identitysearch import is_reducible
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> is_reducible((x, y, z), 1, 0, 3)
True
Check if an interval in the circuit can be reduced:
>>> is_reducible((x, y, z), 1, 1, 3)
False
>>> is_reducible((x, y, y), 1, 1, 3)
True
"""
current_circuit = ()
# Start from the gate at "end" and go down to almost the gate at "begin"
for ndx in reversed(range(begin, end)):
next_gate = circuit[ndx]
current_circuit = (next_gate,) + current_circuit
# If a circuit as a matrix is equivalent to a scalar value
if (is_scalar_matrix(current_circuit, nqubits, False)):
return True
return False
def bfs_identity_search(gate_list, nqubits, max_depth=None,
identity_only=False):
"""Constructs a set of gate identities from the list of possible gates.
Performs a breadth first search over the space of gate identities.
This allows the finding of the shortest gate identities first.
Parameters
==========
gate_list : list, Gate
A list of Gates from which to search for gate identities.
nqubits : int
The number of qubits the quantum circuit operates on.
max_depth : int
The longest quantum circuit to construct from gate_list.
identity_only : bool
True to search for gate identities that reduce to identity;
False to search for gate identities that reduce to a scalar.
Examples
========
Find a list of gate identities:
>>> from sympy.physics.quantum.identitysearch import bfs_identity_search
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> bfs_identity_search([x], 1, max_depth=2)
{GateIdentity(X(0), X(0))}
>>> bfs_identity_search([x, y, z], 1)
{GateIdentity(X(0), X(0)), GateIdentity(Y(0), Y(0)),
GateIdentity(Z(0), Z(0)), GateIdentity(X(0), Y(0), Z(0))}
Find a list of identities that only equal to 1:
>>> bfs_identity_search([x, y, z], 1, identity_only=True)
{GateIdentity(X(0), X(0)), GateIdentity(Y(0), Y(0)),
GateIdentity(Z(0), Z(0))}
"""
if max_depth is None or max_depth <= 0:
max_depth = len(gate_list)
id_only = identity_only
# Start with an empty sequence (implicitly contains an IdentityGate)
queue = deque([()])
# Create an empty set of gate identities
ids = set()
# Begin searching for gate identities in given space.
while (len(queue) > 0):
current_circuit = queue.popleft()
for next_gate in gate_list:
new_circuit = current_circuit + (next_gate,)
# Determines if a (strict) subcircuit is a scalar matrix
circuit_reducible = is_reducible(new_circuit, nqubits,
1, len(new_circuit))
# In many cases when the matrix is a scalar value,
# the evaluated matrix will actually be an integer
if (is_scalar_matrix(new_circuit, nqubits, id_only) and
not is_degenerate(ids, new_circuit) and
not circuit_reducible):
ids.add(GateIdentity(*new_circuit))
elif (len(new_circuit) < max_depth and
not circuit_reducible):
queue.append(new_circuit)
return ids
def random_identity_search(gate_list, numgates, nqubits):
"""Randomly selects numgates from gate_list and checks if it is
a gate identity.
If the circuit is a gate identity, the circuit is returned;
Otherwise, None is returned.
"""
gate_size = len(gate_list)
circuit = ()
for i in range(numgates):
next_gate = gate_list[randint(0, gate_size - 1)]
circuit = circuit + (next_gate,)
is_scalar = is_scalar_matrix(circuit, nqubits, False)
return circuit if is_scalar else None
| GateIdentity |
python | django__django | django/forms/widgets.py | {
"start": 12328,
"end": 12437
} | class ____(Input):
input_type = "search"
template_name = "django/forms/widgets/search.html"
| SearchInput |
python | numba__numba | numba/core/types/scalars.py | {
"start": 5803,
"end": 6095
} | class ____(EnumClass):
"""
Type class for IntEnum classes.
"""
basename = "IntEnum class"
@cached_property
def member_type(self):
"""
The type of this class' members.
"""
return IntEnumMember(self.instance_class, self.dtype)
| IntEnumClass |
python | TheAlgorithms__Python | dynamic_programming/bitmask.py | {
"start": 427,
"end": 3125
} | class ____:
def __init__(self, task_performed, total):
self.total_tasks = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
self.dp = [
[-1 for i in range(total + 1)] for j in range(2 ** len(task_performed))
]
self.task = defaultdict(list) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
self.final_mask = (1 << len(task_performed)) - 1
def count_ways_until(self, mask, task_no):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
total_ways_until = self.count_ways_until(mask, task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_until += self.count_ways_until(mask | (1 << p), task_no + 1)
# save the value.
self.dp[mask][task_no] = total_ways_until
return self.dp[mask][task_no]
def count_no_of_ways(self, task_performed):
# Store the list of persons for each task
for i in range(len(task_performed)):
for j in task_performed[i]:
self.task[j].append(i)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1)
if __name__ == "__main__":
total_tasks = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
task_performed = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
"""
For the particular example the tasks can be distributed as
(1,2,3), (1,2,4), (1,5,3), (1,5,4), (3,1,4),
(3,2,4), (3,5,4), (4,1,3), (4,2,3), (4,5,3)
total 10
"""
| AssignmentUsingBitmask |
python | pallets__jinja | src/jinja2/utils.py | {
"start": 13155,
"end": 21321
} | class ____:
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self._mapping: dict[t.Any, t.Any] = {}
self._queue: deque[t.Any] = deque()
self._postinit()
def _postinit(self) -> None:
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self) -> t.Mapping[str, t.Any]:
return {
"capacity": self.capacity,
"_mapping": self._mapping,
"_queue": self._queue,
}
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self) -> tuple[t.Any, ...]:
return (self.capacity,)
def copy(self) -> "te.Self":
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue.extend(self._queue)
return rv
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
try:
return self[key]
except KeyError:
self[key] = default
return default
def clear(self) -> None:
"""Clear the cache."""
with self._wlock:
self._mapping.clear()
self._queue.clear()
def __contains__(self, key: t.Any) -> bool:
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self) -> int:
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self._mapping!r}>"
def __getitem__(self, key: t.Any) -> t.Any:
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
with self._wlock:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
def __setitem__(self, key: t.Any, value: t.Any) -> None:
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
with self._wlock:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
def __delitem__(self, key: t.Any) -> None:
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
with self._wlock:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
pass
def items(self) -> t.Iterable[tuple[t.Any, t.Any]]:
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def values(self) -> t.Iterable[t.Any]:
"""Return a list of all values."""
return [x[1] for x in self.items()]
def keys(self) -> t.Iterable[t.Any]:
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def __iter__(self) -> t.Iterator[t.Any]:
return reversed(tuple(self._queue))
def __reversed__(self) -> t.Iterator[t.Any]:
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
def select_autoescape(
enabled_extensions: t.Collection[str] = ("html", "htm", "xml"),
disabled_extensions: t.Collection[str] = (),
default_for_string: bool = True,
default: bool = False,
) -> t.Callable[[str | None], bool]:
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
If you want to enable it for all templates created from strings or
for all templates with `.html` and `.xml` extensions::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
enabled_extensions=('html', 'xml'),
default_for_string=True,
))
Example configuration to turn it on at all times except if the template
ends with `.txt`::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
disabled_extensions=('txt',),
default_for_string=True,
default=True,
))
The `enabled_extensions` is an iterable of all the extensions that
autoescaping should be enabled for. Likewise `disabled_extensions` is
a list of all templates it should be disabled for. If a template is
loaded from a string then the default from `default_for_string` is used.
If nothing matches then the initial value of autoescaping is set to the
value of `default`.
For security reasons this function operates case insensitive.
.. versionadded:: 2.9
"""
enabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in enabled_extensions)
disabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in disabled_extensions)
def autoescape(template_name: str | None) -> bool:
if template_name is None:
return default_for_string
template_name = template_name.lower()
if template_name.endswith(enabled_patterns):
return True
if template_name.endswith(disabled_patterns):
return False
return default
return autoescape
def htmlsafe_json_dumps(
obj: t.Any, dumps: t.Callable[..., str] | None = None, **kwargs: t.Any
) -> markupsafe.Markup:
"""Serialize an object to a string of JSON with :func:`json.dumps`,
then replace HTML-unsafe characters with Unicode escapes and mark
the result safe with :class:`~markupsafe.Markup`.
This is available in templates as the ``|tojson`` filter.
The following characters are escaped: ``<``, ``>``, ``&``, ``'``.
The returned string is safe to render in HTML documents and
``<script>`` tags. The exception is in HTML attributes that are
double quoted; either use single quotes or the ``|forceescape``
filter.
:param obj: The object to serialize to JSON.
:param dumps: The ``dumps`` function to use. Defaults to
``env.policies["json.dumps_function"]``, which defaults to
:func:`json.dumps`.
:param kwargs: Extra arguments to pass to ``dumps``. Merged onto
``env.policies["json.dumps_kwargs"]``.
.. versionchanged:: 3.0
The ``dumper`` parameter is renamed to ``dumps``.
.. versionadded:: 2.9
"""
if dumps is None:
dumps = json.dumps
return markupsafe.Markup(
dumps(obj, **kwargs)
.replace("<", "\\u003c")
.replace(">", "\\u003e")
.replace("&", "\\u0026")
.replace("'", "\\u0027")
)
| LRUCache |
python | sqlalchemy__sqlalchemy | test/perf/compiled_extensions/base.py | {
"start": 336,
"end": 3573
} | class ____:
"""Base test case. Mark test cases with ``test_case``"""
IMPLEMENTATIONS = {}
"Keys are the impl name, values are callable to load it"
NUMBER = 1_000_000
_CASES = []
def __init__(self, impl):
self.impl = impl
self.init_objects()
def __init_subclass__(cls):
if not cls.__name__.startswith("_"):
Case._CASES.append(cls)
def init_objects(self):
pass
@classmethod
def init_class(cls):
pass
@classmethod
def _load(cls, fn):
try:
return fn()
except Exception as e:
print(f"Error loading {fn}: {e!r}")
@classmethod
def import_impl(cls):
impl = []
for name, fn in cls.IMPLEMENTATIONS.items():
obj = cls._load(fn)
if obj:
impl.append((name, obj))
return impl
@classmethod
def _divide_results(cls, results, num, div, name):
"utility method to create ratios of two implementation"
avg_str = "> mean of values"
if div in results and num in results:
num_dict = results[num]
div_dict = results[div]
assert avg_str not in num_dict and avg_str not in div_dict
assert num_dict.keys() == div_dict.keys()
results[name] = {m: num_dict[m] / div_dict[m] for m in div_dict}
not_na = [v for v in results[name].values() if not math.isnan(v)]
avg = sum(not_na) / len(not_na)
results[name][avg_str] = avg
@classmethod
def update_results(cls, results):
pass
@classmethod
def run_case(cls, factor, filter_):
objects = cls.import_impl()
cls.init_class()
number = max(1, int(cls.NUMBER * factor))
stack = [c for c in cls.mro() if c not in {object, Case}]
methods = []
while stack:
curr = stack.pop(0)
# dict keeps the definition order, dir is instead sorted
methods += [
m
for m, fn in curr.__dict__.items()
if hasattr(fn, "__test_case__")
]
if filter_:
methods = [m for m in methods if re.search(filter_, m)]
results = defaultdict(dict)
for name, impl in objects:
print(f"Running {name:<10} ", end="", flush=True)
impl_case = cls(impl)
fails = []
for m in methods:
call = getattr(impl_case, m)
try:
t_num = number
fn_num = getattr(call, "__number__", None)
if fn_num is not None:
t_num = max(1, int(fn_num * factor))
value = timeit(call, number=t_num)
print(".", end="", flush=True)
except Exception as e:
fails.append(f"{name}::{m} error: {e}")
print("x", end="", flush=True)
value = float("nan")
results[name][m] = value
print(" Done")
for f in fails:
print("\t", f)
cls.update_results(results)
return results, [name for name, _ in objects]
| Case |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/with2.py | {
"start": 1197,
"end": 1504
} | class ____(AbstractContextManager[Any]):
def __exit__(self, exc_type: Any, exc_value: Any, tb: Any) -> Literal[True]:
return True
def test3(val: str | None):
val = None
with Class5():
val = ""
raise Exception
reveal_type(val, expected_text="Literal[''] | None")
| Class5 |
python | PrefectHQ__prefect | src/prefect/settings/models/server/flow_run_graph.py | {
"start": 210,
"end": 1146
} | class ____(PrefectBaseSettings):
"""
Settings for controlling behavior of the flow run graph
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "flow_run_graph")
)
max_nodes: int = Field(
default=10000,
description="The maximum size of a flow run graph on the v2 API",
validation_alias=AliasChoices(
AliasPath("max_nodes"),
"prefect_server_flow_run_graph_max_nodes",
"prefect_api_max_flow_run_graph_nodes",
),
)
max_artifacts: int = Field(
default=10000,
description="The maximum number of artifacts to show on a flow run graph on the v2 API",
validation_alias=AliasChoices(
AliasPath("max_artifacts"),
"prefect_server_flow_run_graph_max_artifacts",
"prefect_api_max_flow_run_graph_artifacts",
),
)
| ServerFlowRunGraphSettings |
python | django__django | tests/fixtures_regress/models.py | {
"start": 922,
"end": 1039
} | class ____(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ("id",)
| Parent |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_mixed.py | {
"start": 154,
"end": 4837
} | class ____:
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, figure, width, height, dpi, vector_renderer,
raster_renderer_class=None,
bbox_inches_restore=None):
"""
Parameters
----------
figure : `~matplotlib.figure.Figure`
The figure instance.
width : float
The width of the canvas in logical units
height : float
The height of the canvas in logical units
dpi : float
The dpi of the canvas
vector_renderer : `~matplotlib.backend_bases.RendererBase`
An instance of a subclass of
`~matplotlib.backend_bases.RendererBase` that will be used for the
vector drawing.
raster_renderer_class : `~matplotlib.backend_bases.RendererBase`
The renderer class to use for the raster drawing. If not provided,
this will use the Agg backend (which is currently the only viable
option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
self._vector_renderer = vector_renderer
self._raster_renderer = None
# A reference to the figure is needed as we need to change
# the figure dpi before and after the rasterization. Although
# this looks ugly, I couldn't find a better solution. -JJL
self.figure = figure
self._figdpi = figure.dpi
self._bbox_inches_restore = bbox_inches_restore
self._renderer = vector_renderer
def __getattr__(self, attr):
# Proxy everything that hasn't been overridden to the base
# renderer. Things that *are* overridden can call methods
# on self._renderer directly, but must not cache/store
# methods (because things like RendererAgg change their
# methods on the fly in order to optimise proxying down
# to the underlying C implementation).
return getattr(self._renderer, attr)
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
`stop_rasterizing` is called) will be drawn with the raster backend.
"""
# change the dpi of the figure temporarily.
self.figure.dpi = self.dpi
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._renderer = self._raster_renderer
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._raster_renderer)
self._bbox_inches_restore = r
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last `start_rasterizing` call will be copied to the
vector backend by calling draw_image.
"""
self._renderer = self._vector_renderer
height = self._height * self.dpi
img = np.asarray(self._raster_renderer.buffer_rgba())
slice_y, slice_x = cbook._get_nonzero_slices(img[..., 3])
cropped_img = img[slice_y, slice_x]
if cropped_img.size:
gc = self._renderer.new_gc()
# TODO: If the mixedmode resolution differs from the figure's
# dpi, the image must be scaled (dpi->_figdpi). Not all
# backends support this.
self._renderer.draw_image(
gc,
slice_x.start * self._figdpi / self.dpi,
(height - slice_y.stop) * self._figdpi / self.dpi,
cropped_img[::-1])
self._raster_renderer = None
# restore the figure dpi.
self.figure.dpi = self._figdpi
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._vector_renderer,
self._figdpi)
self._bbox_inches_restore = r
| MixedModeRenderer |
python | milvus-io__pymilvus | tests/test_prepare.py | {
"start": 22621,
"end": 22988
} | class ____:
def test_alter_collection_request(self):
req = Prepare.alter_collection_request('foo', {'collection.ttl.seconds': 1800})
assert req.collection_name == 'foo'
assert len(req.properties) == 1
assert req.properties[0].key == 'collection.ttl.seconds'
assert req.properties[0].value == '1800'
| TestAlterCollectionRequest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol43.py | {
"start": 159,
"end": 551
} | class ____(Protocol[A]):
def __add__(self: A, other: A) -> A: ...
T1 = TypeVar("T1", bound=HasAdd1)
def merge_element_lists1(a: Sequence[T1], b: Sequence[T1]) -> Sequence[T1]:
retval: Sequence[T1] = []
for a_elem in a:
for b_elem in b:
retval.append(a_elem + b_elem)
return retval
# This is similar to HasAdd1 except that the class isn't generic.
| HasAdd1 |
python | dagster-io__dagster | examples/airlift-migration-tutorial/tutorial_example/snippets/custom_operator_examples/custom_dag_level_proxy.py | {
"start": 282,
"end": 1413
} | class ____(BaseProxyDAGToDagsterOperator):
def get_dagster_session(self, context: Context) -> requests.Session: # pyright: ignore[reportIncompatibleMethodOverride]
if "var" not in context:
raise ValueError("No variables found in context")
api_key = context["var"]["value"].get("my_api_key")
session = requests.Session()
session.headers.update({"Authorization": f"Bearer {api_key}"})
return session
def get_dagster_url(self, context: Context) -> str: # pyright: ignore[reportIncompatibleMethodOverride]
return "https://dagster.example.com/"
# This method controls how the operator is built from the dag.
@classmethod
def build_from_dag(cls, dag: DAG):
return CustomProxyToDagsterOperator(dag=dag, task_id="OVERRIDDEN")
dag = DAG(
dag_id="custom_dag_level_proxy_example",
)
# At the end of your dag file
proxying_to_dagster(
global_vars=globals(),
proxied_state=load_proxied_state_from_yaml(Path(__file__).parent / "proxied_state"),
build_from_dag_fn=CustomProxyToDagsterOperator.build_from_dag,
)
| CustomProxyToDagsterOperator |
python | python-markdown__markdown | tests/test_syntax/inline/test_links.py | {
"start": 781,
"end": 6455
} | class ____(TestCase):
def test_nested_square_brackets(self):
self.assertMarkdownRenders(
"""[Text[[[[[[[]]]]]]][]](http://link.com) more text""",
"""<p><a href="http://link.com">Text[[[[[[[]]]]]]][]</a> more text</p>"""
)
def test_nested_round_brackets(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/(((((((()))))))())) more text""",
"""<p><a href="http://link.com/(((((((()))))))())">Text</a> more text</p>"""
)
def test_nested_escaped_brackets(self):
self.assertMarkdownRenders(
R"""[Text](/url\(test\) "title").""",
"""<p><a href="/url(test)" title="title">Text</a>.</p>"""
)
def test_nested_escaped_brackets_and_angles(self):
self.assertMarkdownRenders(
R"""[Text](</url\(test\)> "title").""",
"""<p><a href="/url(test)" title="title">Text</a>.</p>"""
)
def test_nested_unescaped_brackets(self):
self.assertMarkdownRenders(
R"""[Text](/url(test) "title").""",
"""<p><a href="/url(test)" title="title">Text</a>.</p>"""
)
def test_nested_unescaped_brackets_and_angles(self):
self.assertMarkdownRenders(
R"""[Text](</url(test)> "title").""",
"""<p><a href="/url(test)" title="title">Text</a>.</p>"""
)
def test_uneven_brackets_with_titles1(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/("title") more text""",
"""<p><a href="http://link.com/(" title="title">Text</a> more text</p>"""
)
def test_uneven_brackets_with_titles2(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/('"title") more text""",
"""<p><a href="http://link.com/('" title="title">Text</a> more text</p>"""
)
def test_uneven_brackets_with_titles3(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/("title)") more text""",
"""<p><a href="http://link.com/(" title="title)">Text</a> more text</p>"""
)
def test_uneven_brackets_with_titles4(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/( "title") more text""",
"""<p><a href="http://link.com/(" title="title">Text</a> more text</p>"""
)
def test_uneven_brackets_with_titles5(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/( "title)") more text""",
"""<p><a href="http://link.com/(" title="title)">Text</a> more text</p>"""
)
def test_mixed_title_quotes1(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/'"title") more text""",
"""<p><a href="http://link.com/'" title="title">Text</a> more text</p>"""
)
def test_mixed_title_quotes2(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/"'title') more text""",
"""<p><a href="http://link.com/"" title="title">Text</a> more text</p>"""
)
def test_mixed_title_quotes3(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/with spaces'"and quotes" 'and title') more text""",
"""<p><a href="http://link.com/with spaces" title=""and quotes" 'and title">"""
"""Text</a> more text</p>"""
)
def test_mixed_title_quotes4(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/with spaces'"and quotes" 'and title") more text""",
"""<p><a href="http://link.com/with spaces'" title="and quotes" 'and title">Text</a> more text</p>"""
)
def test_mixed_title_quotes5(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/with spaces '"and quotes" 'and title') more text""",
"""<p><a href="http://link.com/with spaces" title=""and quotes" 'and title">"""
"""Text</a> more text</p>"""
)
def test_mixed_title_quotes6(self):
self.assertMarkdownRenders(
"""[Text](http://link.com/with spaces "and quotes" 'and title') more text""",
"""<p><a href="http://link.com/with spaces "and quotes"" title="and title">"""
"""Text</a> more text</p>"""
)
def test_single_quote(self):
self.assertMarkdownRenders(
"""[test](link"notitle)""",
"""<p><a href="link"notitle">test</a></p>"""
)
def test_angle_with_mixed_title_quotes(self):
self.assertMarkdownRenders(
"""[Text](<http://link.com/with spaces '"and quotes"> 'and title') more text""",
"""<p><a href="http://link.com/with spaces '"and quotes"" title="and title">"""
"""Text</a> more text</p>"""
)
def test_amp_in_url(self):
"""Test amp in URLs."""
self.assertMarkdownRenders(
'[link](http://www.freewisdom.org/this&that)',
'<p><a href="http://www.freewisdom.org/this&that">link</a></p>'
)
self.assertMarkdownRenders(
'[title](http://example.com/?a=1&b=2)',
'<p><a href="http://example.com/?a=1&b=2">title</a></p>'
)
self.assertMarkdownRenders(
'[title](http://example.com/?a=1&b=2)',
'<p><a href="http://example.com/?a=1&b=2">title</a></p>'
)
def test_angles_and_nonsense_url(self):
self.assertMarkdownRenders(
'[test nonsense](<?}]*+|&)>).',
'<p><a href="?}]*+|&)">test nonsense</a>.</p>'
)
| TestInlineLinks |
python | gevent__gevent | src/greentest/3.10/test_threading.py | {
"start": 55887,
"end": 55986
} | class ____(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
| ConditionTests |
python | huggingface__transformers | src/transformers/models/idefics/vision.py | {
"start": 15035,
"end": 19053
} | class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`IdeficsVisionEncoderLayer`].
Args:
config: IdeficsVisionConfig
"""
def __init__(self, config: IdeficsVisionConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer
| IdeficsVisionEncoder |
python | sympy__sympy | sympy/polys/polyerrors.py | {
"start": 294,
"end": 811
} | class ____(BasePolynomialError):
def __init__(self, f, g, dom=None):
self.f, self.g, self.dom = f, g, dom
def __str__(self): # pragma: no cover
from sympy.printing.str import sstr
if self.dom is None:
return "%s does not divide %s" % (sstr(self.g), sstr(self.f))
else:
return "%s does not divide %s in %s" % (sstr(self.g), sstr(self.f), sstr(self.dom))
def new(self, f, g):
return self.__class__(f, g, self.dom)
@public
| ExactQuotientFailed |
python | numba__numba | numba/tests/test_target_extension.py | {
"start": 1882,
"end": 2121
} | class ____(GPU):
...
# register the dpu target hierarchy token in the target registry, this
# permits lookup and reference in userspace by the string "dpu"
target_registry["dpu"] = DPU
# Create a JIT DPU codegen for the DPU target
| DPU |
python | Netflix__metaflow | test/core/tests/run_id_file.py | {
"start": 67,
"end": 1220
} | class ____(MetaflowTest):
"""
Resuming and initial running of a flow should write run id file early (prior to execution)
"""
RESUME = True
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@steps(0, ["singleton-start"], required=True)
def step_start(self):
import os
from metaflow import current
# Whether we are in "run" or "resume" mode, --run-id-file must be written prior to execution
assert os.path.isfile(
"run-id"
), "run id file should exist before resume execution"
with open("run-id", "r") as f:
run_id_from_file = f.read()
assert run_id_from_file == current.run_id
# Test both regular run and resume paths
if not is_resumed():
raise ResumeFromHere()
@steps(2, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
pass
| RunIdFileTest |
python | apache__airflow | providers/airbyte/src/airflow/providers/airbyte/triggers/airbyte.py | {
"start": 1086,
"end": 4971
} | class ____(BaseTrigger):
"""
Triggers Airbyte Sync, makes an asynchronous HTTP call to get the status via a job ID.
This trigger is designed to initiate and monitor the status of Airbyte Sync jobs. It
makes use of asynchronous communication to check the progress of a job run over time.
:param conn_id: The connection identifier for connecting to Airbyte.
:param job_id: The ID of an Airbyte Sync job.
:param end_time: Time in seconds to wait for a job run to reach a terminal status. Defaults to 7 days.
:param poll_interval: polling period in seconds to check for the status.
"""
def __init__(
self,
job_id: int,
conn_id: str,
end_time: float,
poll_interval: float,
):
super().__init__()
self.job_id = job_id
self.conn_id = conn_id
self.end_time = end_time
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize AirbyteSyncTrigger arguments and classpath."""
return (
"airflow.providers.airbyte.triggers.airbyte.AirbyteSyncTrigger",
{
"job_id": self.job_id,
"conn_id": self.conn_id,
"end_time": self.end_time,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make async connection to Airbyte, polls for the pipeline run status."""
hook = AirbyteHook(airbyte_conn_id=self.conn_id)
try:
while await self.is_still_running(hook):
if self.end_time < time.time():
yield TriggerEvent(
{
"status": "error",
"message": f"Job run {self.job_id} has not reached a terminal status after "
f"{self.end_time} seconds.",
"job_id": self.job_id,
}
)
return
await asyncio.sleep(self.poll_interval)
job_run_status = hook.get_job_status(self.job_id)
if job_run_status == JobStatusEnum.SUCCEEDED:
yield TriggerEvent(
{
"status": "success",
"message": f"Job run {self.job_id} has completed successfully.",
"job_id": self.job_id,
}
)
elif job_run_status == JobStatusEnum.CANCELLED:
yield TriggerEvent(
{
"status": "cancelled",
"message": f"Job run {self.job_id} has been cancelled.",
"job_id": self.job_id,
}
)
else:
yield TriggerEvent(
{
"status": "error",
"message": f"Job run {self.job_id} has failed.",
"job_id": self.job_id,
}
)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e), "job_id": self.job_id})
async def is_still_running(self, hook: AirbyteHook) -> bool:
"""
Async function to check whether the job is submitted via async API.
If job is in running state returns True if it is still running else return False
"""
job_run_status = hook.get_job_status(self.job_id)
if job_run_status in (JobStatusEnum.RUNNING, JobStatusEnum.PENDING, JobStatusEnum.INCOMPLETE):
self.log.debug(
"Job run status is: %s with context: %s", job_run_status, hook.get_job_details(self.job_id)
)
return True
return False
| AirbyteSyncTrigger |
python | numpy__numpy | numpy/_core/tests/test_conversion_utils.py | {
"start": 3470,
"end": 3749
} | class ____(StringConverterTestCase):
""" Tests of PyArray_SelectkindConverter """
conv = mt.run_selectkind_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check('introselect', 'NPY_INTROSELECT')
| TestSelectkindConverter |
python | dask__distributed | distributed/tests/test_active_memory_manager.py | {
"start": 42132,
"end": 44434
} | class ____:
def __init__(self):
self.n = 0
def increment(self):
self.n += 1
@gen_cluster(client=True, config=demo_config("drop"))
async def test_dont_drop_actors(c, s, a, b):
x = c.submit(Counter, key="x", actor=True, workers=[a.address])
y = c.submit(lambda cnt: cnt.increment(), x, key="y", workers=[b.address])
await wait([x, y])
assert len(s.tasks["x"].who_has) == 2
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 2
@gen_cluster(client=True, config=demo_config("replicate"))
async def test_dont_replicate_actors(c, s, a, b):
x = c.submit(Counter, key="x", actor=True)
await wait(x)
assert len(s.tasks["x"].who_has) == 1
s.extensions["amm"].run_once()
await asyncio.sleep(0.2)
assert len(s.tasks["x"].who_has) == 1
@pytest.mark.parametrize("has_proxy", [False, True])
@gen_cluster(client=True, config=NO_AMM)
async def test_RetireWorker_with_actor(c, s, a, b, has_proxy):
"""A worker holding one or more original actor objects cannot be retired"""
x = c.submit(Counter, key="x", actor=True, workers=[a.address])
await wait(x)
assert "x" in a.state.actors
if has_proxy:
y = c.submit(
lambda cnt: cnt.increment().result(), x, key="y", workers=[b.address]
)
await wait(y)
assert "x" in b.data
assert "y" in b.data
with captured_logger("distributed.active_memory_manager", logging.WARNING) as log:
out = await c.retire_workers([a.address])
assert not out
assert "it holds actor(s)" in log.getvalue()
assert "x" in a.state.actors
if has_proxy:
assert "x" in b.data
assert "y" in b.data
@gen_cluster(client=True, config=NO_AMM)
async def test_RetireWorker_with_actor_proxy(c, s, a, b):
"""A worker holding an Actor proxy object can be retired as normal."""
x = c.submit(Counter, key="x", actor=True, workers=[a.address])
y = c.submit(lambda cnt: cnt.increment().result(), x, key="y", workers=[b.address])
await wait(y)
assert "x" in a.state.actors
assert "x" in b.data
assert "y" in b.data
out = await c.retire_workers([b.address])
assert b.address in out
assert "x" in a.state.actors
assert "y" in a.data
| Counter |
python | huggingface__transformers | tests/models/electra/test_modeling_electra.py | {
"start": 12961,
"end": 23009
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
ElectraModel,
ElectraForPreTraining,
ElectraForMaskedLM,
ElectraForCausalLM,
ElectraForMultipleChoice,
ElectraForTokenClassification,
ElectraForSequenceClassification,
ElectraForQuestionAnswering,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": ElectraModel,
"fill-mask": ElectraForMaskedLM,
"question-answering": ElectraForQuestionAnswering,
"text-classification": ElectraForSequenceClassification,
"text-generation": ElectraForCausalLM,
"token-classification": ElectraForTokenClassification,
"zero-shot": ElectraForSequenceClassification,
}
if is_torch_available()
else {}
)
# Overwriting to add `is_decoder` flag
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs = super().prepare_config_and_inputs_for_generate(batch_size)
config.is_decoder = True
return config, inputs
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = ElectraModelTester(self)
self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_electra_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_model(*config_and_inputs)
def test_electra_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_electra_model_as_decoder(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_token_classification(*config_and_inputs)
def test_for_pre_training(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_pretraining(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_sequence_classification(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_question_answering(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_electra_for_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "google/electra-small-generator"
model = ElectraModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_electra_for_causal_lm(*config_and_inputs)
def attention_mask_padding_matches_padding_free_with_position_ids(
self, attn_implementation: str, fa_kwargs: bool = False
):
"""
Overwritten to account for the embeddings that rely on position ids.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
max_new_tokens = 30
support_flag = {
"sdpa": "_supports_sdpa",
"flash_attention_2": "_supports_flash_attn",
"flash_attention_3": "_supports_flash_attn",
}
for model_class in self.all_generative_model_classes:
if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]):
self.skipTest(f"{model_class.__name__} does not support {attn_implementation}")
# can't infer if new attn mask API is supported by assume that only model with attention backend support it
if not model_class._supports_attention_backend:
self.skipTest(f"{model_class.__name__} does not support new attention mask API")
if model_class._is_stateful: # non-transformer models most probably have no packing support
self.skipTest(f"{model_class.__name__} doesn't support packing!")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if config.is_encoder_decoder:
self.skipTest("Model is an encoder-decoder")
if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict:
self.skipTest("Model dummy inputs should contain padding in their attention mask")
if "input_ids" not in inputs_dict or inputs_dict["input_ids"].ndim != 2:
self.skipTest("Model dummy inputs should contain text input ids")
# make sure that all models have enough positions for generation
dummy_input_ids = inputs_dict["input_ids"]
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input_ids.shape[1] + 1
model = model_class(config)
if "position_ids" not in inspect.signature(model.forward).parameters:
self.skipTest("Model does not support position_ids")
if (not fa_kwargs) and "position_ids" not in inspect.signature(model.forward).parameters:
continue # this model doesn't accept position ids as input
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Drop all keys except for the minimal set. Hard to manipulate with multimodals etc
inputs_dict = {k: v for k, v in inputs_dict.items() if k in ["input_ids", "attention_mask"]}
# Ensure left padding, to adapt for some models
if 0 in inputs_dict["attention_mask"][:, -1]:
inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1)
dummy_attention_mask = inputs_dict["attention_mask"]
dummy_input_ids[~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id
# Main difference to other models, we need to prepare position ids according to the attention mask
# as we use it to extract embeddings that rely on the correct position - naively increasing sequences do
# not suffice anymore atp. The solution here calculates an increasing sequences for all 1s and puts 0s else.
inputs_dict["position_ids"] = ((inputs_dict["attention_mask"] == 1).long().cumsum(dim=1) - 1) * (
inputs_dict["attention_mask"] == 1
).long()
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation=attn_implementation,
)
.to(torch_device)
.eval()
)
if fa_kwargs:
# flatten
features = [
{"input_ids": i[a.bool()].tolist()} for i, a in zip(dummy_input_ids, dummy_attention_mask)
]
# add position_ids + fa_kwargs
data_collator = DataCollatorWithFlattening(return_tensors="pt", return_flash_attn_kwargs=True)
batch = data_collator(features)
padfree_inputs_dict = {
k: t.to(torch_device) if torch.is_tensor(t) else t for k, t in batch.items()
}
else:
# create packed position_ids
position_ids = (
torch.cat([torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()])
.long()
.unsqueeze(0)
.to(torch_device)
)
padfree_inputs_dict = {
"input_ids": dummy_input_ids[dummy_attention_mask.bool()].unsqueeze(0),
"position_ids": position_ids,
}
# We need to do simple forward without cache in order to trigger packed SDPA/flex/eager attention path
res_padded = model(**inputs_dict, use_cache=False)
res_padfree = model(**padfree_inputs_dict, use_cache=False)
logits_padded = res_padded.logits[dummy_attention_mask.bool()]
logits_padfree = res_padfree.logits[0]
# acceptable numerical instability
tol = torch.finfo(torch.bfloat16).eps
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
@require_torch
| ElectraModelTest |
python | scrapy__scrapy | tests/test_utils_iterators.py | {
"start": 447,
"end": 9907
} | class ____(ABC):
@abstractmethod
def xmliter(
self, obj: Response | str | bytes, nodename: str, *args: Any
) -> Iterator[Selector]:
raise NotImplementedError
def test_xmliter(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<products xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="someschmea.xsd">
<product id="001">
<type>Type 1</type>
<name>Name 1</name>
</product>
<product id="002">
<type>Type 2</type>
<name>Name 2</name>
</product>
</products>
"""
response = XmlResponse(url="http://example.com", body=body)
attrs = [
(
x.attrib["id"],
x.xpath("name/text()").getall(),
x.xpath("./type/text()").getall(),
)
for x in self.xmliter(response, "product")
]
assert attrs == [
("001", ["Name 1"], ["Type 1"]),
("002", ["Name 2"], ["Type 2"]),
]
def test_xmliter_unusual_node(self):
body = b"""<?xml version="1.0" encoding="UTF-8"?>
<root>
<matchme...></matchme...>
<matchmenot></matchmenot>
</root>
"""
response = XmlResponse(url="http://example.com", body=body)
nodenames = [
e.xpath("name()").getall() for e in self.xmliter(response, "matchme...")
]
assert nodenames == [["matchme..."]]
def test_xmliter_unicode(self):
# example taken from https://github.com/scrapy/scrapy/issues/1665
body = """<?xml version="1.0" encoding="UTF-8"?>
<þingflokkar>
<þingflokkur id="26">
<heiti />
<skammstafanir>
<stuttskammstöfun>-</stuttskammstöfun>
<löngskammstöfun />
</skammstafanir>
<tímabil>
<fyrstaþing>80</fyrstaþing>
</tímabil>
</þingflokkur>
<þingflokkur id="21">
<heiti>Alþýðubandalag</heiti>
<skammstafanir>
<stuttskammstöfun>Ab</stuttskammstöfun>
<löngskammstöfun>Alþb.</löngskammstöfun>
</skammstafanir>
<tímabil>
<fyrstaþing>76</fyrstaþing>
<síðastaþing>123</síðastaþing>
</tímabil>
</þingflokkur>
<þingflokkur id="27">
<heiti>Alþýðuflokkur</heiti>
<skammstafanir>
<stuttskammstöfun>A</stuttskammstöfun>
<löngskammstöfun>Alþfl.</löngskammstöfun>
</skammstafanir>
<tímabil>
<fyrstaþing>27</fyrstaþing>
<síðastaþing>120</síðastaþing>
</tímabil>
</þingflokkur>
</þingflokkar>"""
for r in (
# with bytes
XmlResponse(url="http://example.com", body=body.encode("utf-8")),
# Unicode body needs encoding information
XmlResponse(url="http://example.com", body=body, encoding="utf-8"),
):
attrs = [
(
x.attrib["id"],
x.xpath("./skammstafanir/stuttskammstöfun/text()").getall(),
x.xpath("./tímabil/fyrstaþing/text()").getall(),
)
for x in self.xmliter(r, "þingflokkur")
]
assert attrs == [
("26", ["-"], ["80"]),
("21", ["Ab"], ["76"]),
("27", ["A"], ["27"]),
]
def test_xmliter_text(self):
body = (
'<?xml version="1.0" encoding="UTF-8"?>'
"<products><product>one</product><product>two</product></products>"
)
assert [x.xpath("text()").getall() for x in self.xmliter(body, "product")] == [
["one"],
["two"],
]
def test_xmliter_namespaces(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:g="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<g:image_link>http://www.mydummycompany.com/images/item1.jpg</g:image_link>
<g:id>ITEM_1</g:id>
<g:price>400</g:price>
</item>
</channel>
</rss>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
my_iter = self.xmliter(response, "item")
node = next(my_iter)
node.register_namespace("g", "http://base.google.com/ns/1.0")
assert node.xpath("title/text()").getall() == ["Item 1"]
assert node.xpath("description/text()").getall() == ["This is item 1"]
assert node.xpath("link/text()").getall() == [
"http://www.mydummycompany.com/items/1"
]
assert node.xpath("g:image_link/text()").getall() == [
"http://www.mydummycompany.com/images/item1.jpg"
]
assert node.xpath("g:id/text()").getall() == ["ITEM_1"]
assert node.xpath("g:price/text()").getall() == ["400"]
assert node.xpath("image_link/text()").getall() == []
assert node.xpath("id/text()").getall() == []
assert node.xpath("price/text()").getall() == []
def test_xmliter_namespaced_nodename(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:g="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<g:image_link>http://www.mydummycompany.com/images/item1.jpg</g:image_link>
<g:id>ITEM_1</g:id>
<g:price>400</g:price>
</item>
</channel>
</rss>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
my_iter = self.xmliter(response, "g:image_link")
node = next(my_iter)
node.register_namespace("g", "http://base.google.com/ns/1.0")
assert node.xpath("text()").extract() == [
"http://www.mydummycompany.com/images/item1.jpg"
]
def test_xmliter_namespaced_nodename_missing(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:g="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<g:image_link>http://www.mydummycompany.com/images/item1.jpg</g:image_link>
<g:id>ITEM_1</g:id>
<g:price>400</g:price>
</item>
</channel>
</rss>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
my_iter = self.xmliter(response, "g:link_image")
with pytest.raises(StopIteration):
next(my_iter)
def test_xmliter_exception(self):
body = (
'<?xml version="1.0" encoding="UTF-8"?>'
"<products><product>one</product><product>two</product></products>"
)
my_iter = self.xmliter(body, "product")
next(my_iter)
next(my_iter)
with pytest.raises(StopIteration):
next(my_iter)
def test_xmliter_objtype_exception(self):
i = self.xmliter(42, "product")
with pytest.raises(TypeError):
next(i)
def test_xmliter_encoding(self):
body = (
b'<?xml version="1.0" encoding="ISO-8859-9"?>\n'
b"<xml>\n"
b" <item>Some Turkish Characters \xd6\xc7\xde\xdd\xd0\xdc \xfc\xf0\xfd\xfe\xe7\xf6</item>\n"
b"</xml>\n\n"
)
response = XmlResponse("http://www.example.com", body=body)
assert (
next(self.xmliter(response, "item")).get()
== "<item>Some Turkish Characters \xd6\xc7\u015e\u0130\u011e\xdc \xfc\u011f\u0131\u015f\xe7\xf6</item>"
)
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
| TestXmliterBase |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_iso_languages.py | {
"start": 1873,
"end": 4411
} | class ____(ColumnMapExpectation):
"""Expect value to be valid ISO 639-3 languages."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_iso_lan": [
"Hungarian",
"es",
"Bn",
"HU",
"Bengali",
],
"some_invalid": [
"Hungary",
"es",
"hun",
"Turkey",
"USA",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_iso_lan"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_invalid", "mostly": 0.7},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_iso_languages"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["pycountry"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeIsoLanguages().print_diagnostic_checklist()
| ExpectColumnValuesToBeIsoLanguages |
python | encode__httpx | httpx/_decoders.py | {
"start": 2649,
"end": 4728
} | class ____(ContentDecoder):
"""
Handle 'brotli' decoding.
Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/
or `pip install brotli`. See https://github.com/google/brotli
Supports both 'brotlipy' and 'Brotli' packages since they share an import
name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'
"""
def __init__(self) -> None:
if brotli is None: # pragma: no cover
raise ImportError(
"Using 'BrotliDecoder', but neither of the 'brotlicffi' or 'brotli' "
"packages have been installed. "
"Make sure to install httpx using `pip install httpx[brotli]`."
) from None
self.decompressor = brotli.Decompressor()
self.seen_data = False
self._decompress: typing.Callable[[bytes], bytes]
if hasattr(self.decompressor, "decompress"):
# The 'brotlicffi' package.
self._decompress = self.decompressor.decompress # pragma: no cover
else:
# The 'brotli' package.
self._decompress = self.decompressor.process # pragma: no cover
def decode(self, data: bytes) -> bytes:
if not data:
return b""
self.seen_data = True
try:
return self._decompress(data)
except brotli.error as exc:
raise DecodingError(str(exc)) from exc
def flush(self) -> bytes:
if not self.seen_data:
return b""
try:
if hasattr(self.decompressor, "finish"):
# Only available in the 'brotlicffi' package.
# As the decompressor decompresses eagerly, this
# will never actually emit any data. However, it will potentially throw
# errors if a truncated or damaged data stream has been used.
self.decompressor.finish() # pragma: no cover
return b""
except brotli.error as exc: # pragma: no cover
raise DecodingError(str(exc)) from exc
| BrotliDecoder |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol35.py | {
"start": 376,
"end": 419
} | class ____(Protocol):
y: P1
@dataclass
| P2 |
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 79603,
"end": 80901
} | class ____(unittest.TestCase):
def setUp(self):
self.handler = AbstractDigestAuthHandler()
@skip_libssl_fips_mode
def test_md5_algorithm(self):
H, KD = self.handler.get_algorithm_impls('MD5')
self.assertEqual(H("foo"), "acbd18db4cc2f85cedef654fccc4a4d8")
self.assertEqual(KD("foo", "bar"), "4e99e8c12de7e01535248d2bac85e732")
@skip_libssl_fips_mode
def test_sha_algorithm(self):
H, KD = self.handler.get_algorithm_impls('SHA')
self.assertEqual(H("foo"), "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
self.assertEqual(KD("foo", "bar"), "54dcbe67d21d5eb39493d46d89ae1f412d3bd6de")
@skip_libssl_fips_mode
def test_sha256_algorithm(self):
H, KD = self.handler.get_algorithm_impls('SHA-256')
self.assertEqual(H("foo"), "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae")
self.assertEqual(KD("foo", "bar"), "a765a8beaa9d561d4c5cbed29d8f4e30870297fdfa9cb7d6e9848a95fec9f937")
def test_invalid_algorithm(self):
with self.assertRaises(ValueError) as exc:
self.handler.get_algorithm_impls('invalid')
self.assertEqual(
str(exc.exception),
"Unsupported digest authentication algorithm 'invalid'"
)
| TestDigestAuthAlgorithms |
python | getsentry__sentry | src/sentry/web/frontend/base.py | {
"start": 13739,
"end": 21593
} | class ____(View, OrganizationMixin):
auth_required = True
# TODO(dcramer): change sudo so it can be required only on POST
sudo_required = False
csrf_protect = True
def __init__(
self,
auth_required: bool | None = None,
sudo_required: bool | None = None,
csrf_protect: bool | None = None,
*args: Any,
**kwargs: Any,
) -> None:
if auth_required is not None:
self.auth_required = auth_required
if sudo_required is not None:
self.sudo_required = sudo_required
if csrf_protect is not None:
self.csrf_protect = csrf_protect
super().__init__(*args, **kwargs)
@method_decorator(csrf_exempt)
def dispatch(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponseBase:
"""
A note on the CSRF protection process.
Because the CSRF decorators don't work well with view subclasses, we
allow them to control whether a CSRF check is done by setting
self.csrf_protect. This has a couple of implications:
1. We need to mark this method as @csrf_exempt so that when the CSRF
middleware checks it as part of the regular middleware sequence, it
always passes.
2. If self.csrf_protect is set, we will re-run the CSRF check ourselves
using CsrfViewMiddleware().process_view()
3. But first we must remove the csrf_exempt attribute that was set by
the decorator so that the middleware doesn't shortcut and pass the
check unconditionally again.
"""
organization_slug = kwargs.get("organization_slug", None)
if request and is_using_customer_domain(request) and not subdomain_is_region(request):
organization_slug = request.subdomain
self.active_organization = determine_active_organization(request, organization_slug)
if self.csrf_protect:
try:
del self.dispatch.__func__.csrf_exempt # type: ignore[attr-defined] # python/mypy#14123
except AttributeError:
pass
response = self.test_csrf(request)
if response:
return response
if (
is_using_customer_domain(request)
and "organization_slug" in inspect.signature(self.convert_args).parameters
and "organization_slug" not in kwargs
):
# In customer domain contexts, we will need to pre-populate the organization_slug keyword argument.
kwargs["organization_slug"] = organization_slug
if self.is_auth_required(request, *args, **kwargs):
return self.handle_auth_required(request, *args, **kwargs)
if self.is_sudo_required(request):
return self.handle_sudo_required(request, *args, **kwargs)
args, kwargs = self.convert_args(request, *args, **kwargs)
try:
request.access = self.get_access(request, *args, **kwargs)
except DataSecrecyError:
return render_to_response(
"sentry/data-secrecy.html",
context={"organization_slug": organization_slug},
status=403,
request=request,
)
if not self.has_permission(request, *args, **kwargs):
return self.handle_permission_required(request, *args, **kwargs)
if "organization" in kwargs:
org = kwargs["organization"]
if self.is_member_disabled_from_limit(request, org):
return self.handle_disabled_member(org)
if self.is_not_2fa_compliant(request, org):
return self.handle_not_2fa_compliant(request, *args, **kwargs)
self.request = request
self.default_context = self.get_context_data(request, *args, **kwargs)
return self.handle(request, *args, **kwargs)
def test_csrf(self, request: HttpRequest) -> HttpResponseBase | None:
middleware = CsrfViewMiddleware(placeholder_get_response)
return middleware.process_view(request, self.dispatch, (request,), {})
def get_access(self, request: HttpRequest, *args: Any, **kwargs: Any) -> access.Access:
return access.DEFAULT
def convert_args(
self, request: HttpRequest, *args: Any, **kwargs: Any
) -> tuple[tuple[Any, ...], dict[str, Any]]:
return (args, kwargs)
def handle(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponseBase:
return super().dispatch(request, *args, **kwargs)
def is_auth_required(self, request: HttpRequest, *args: Any, **kwargs: Any) -> bool:
return self.auth_required and not (request.user.is_authenticated and request.user.is_active)
def handle_auth_required(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
auth.initiate_login(request, next_url=request.get_full_path())
if "organization_slug" in kwargs:
redirect_to = reverse("sentry-auth-organization", args=[kwargs["organization_slug"]])
else:
redirect_to = auth.get_login_url()
query_params = {
"referrer": request.GET.get("referrer"),
REDIRECT_FIELD_NAME: request.GET.get(REDIRECT_FIELD_NAME),
}
redirect_uri = construct_link_with_query(path=redirect_to, query_params=query_params)
return self.redirect(redirect_uri, headers={"X-Robots-Tag": "noindex, nofollow"})
def is_sudo_required(self, request: HttpRequest) -> bool:
return self.sudo_required and not request.is_sudo()
def handle_sudo_required(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
return redirect_to_sudo(request.get_full_path())
def has_permission(self, request: HttpRequest, *args: Any, **kwargs: Any) -> bool:
return True
def handle_permission_required(
self, request: HttpRequest, *args: Any, **kwargs: Any
) -> HttpResponse:
path = reverse("sentry-login")
query_params = {
"referrer": request.GET.get("referrer"),
REDIRECT_FIELD_NAME: request.GET.get(REDIRECT_FIELD_NAME),
}
redirect_uri = construct_link_with_query(path=path, query_params=query_params)
return self.redirect(redirect_uri)
def handle_not_2fa_compliant(
self, request: HttpRequest, *args: Any, **kwargs: Any
) -> HttpResponse:
redirect_uri = self.get_not_2fa_compliant_url(request, *args, **kwargs)
return self.redirect(redirect_uri)
def get_not_2fa_compliant_url(self, request: HttpRequest, *args: Any, **kwargs: Any) -> str:
return reverse("sentry-account-settings-security")
def get_context_data(self, request: HttpRequest, **kwargs: Any) -> dict[str, Any]:
return csrf(request)
def respond(
self, template: str, context: dict[str, Any] | None = None, status: int = 200
) -> HttpResponse:
default_context = self.default_context
if context:
default_context.update(context)
return render_to_response(template, default_context, self.request, status=status)
def redirect(self, url: str, headers: Mapping[str, str] | None = None) -> HttpResponseRedirect:
res = HttpResponseRedirect(url)
if headers:
for k, v in headers.items():
res[k] = v
return res
def create_audit_entry(
self, request: HttpRequest, transaction_id: int | None = None, **kwargs: Any
) -> object:
return create_audit_entry(request, transaction_id, audit_logger, **kwargs)
def handle_disabled_member(self, organization: Organization) -> HttpResponse:
redirect_uri = reverse("sentry-organization-disabled-member", args=[organization.slug])
return self.redirect(redirect_uri)
| BaseView |
python | pytorch__pytorch | test/test_fake_tensor.py | {
"start": 60788,
"end": 70013
} | class ____(TestCase):
def test_fake_tensor_prop_on_nn_module(self):
class ToyNnModuleWithParameters(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer1 = torch.nn.Linear(4, 3)
self.layer2 = torch.nn.Linear(3, 2)
def forward(self, value):
value = self.layer1(value)
value = torch.relu(value)
value = self.layer2(value)
return value
model = ToyNnModuleWithParameters()
value = torch.randn(5, 4)
# Convert nn.Module to GraphModule so that FakeTensorProp runs.
graph_model = torch.fx.symbolic_trace(model, (value,))
# The following block runs FakeTensorProp on graph_module w/to the same FakeTensorMode
#
# TODO(wschin): there should be an API to run FakeTensorProp for GraphModule
# with parameters and buffers.
with FakeTensorMode() as fake_tensor_mode:
def to_fake_tensor(x):
if isinstance(x, torch.Tensor) and not isinstance(x, FakeTensor):
return fake_tensor_mode.from_tensor(x)
return x
fake_parameters_and_buffers = {
k: to_fake_tensor(v)
for k, v in itertools.chain(
graph_model.named_parameters(), graph_model.named_buffers()
)
}
with torch.nn.utils.stateless._reparametrize_module(
graph_model, fake_parameters_and_buffers
):
# This case uses the **same** fake tensor mode to
# 1. create fake parameters and fake buffers, and
# 2. run FakeTensorProp
# The result should be correct.
result = FakeTensorProp(graph_model, fake_tensor_mode).propagate(value)
self.assertTrue(isinstance(result, FakeTensor))
self.assertEqual(result.shape, (5, 2))
# This case uses the **different** fake tensor modes to
# 1. create fake parameters and fake buffers, and
# 2. run FakeTensorProp
# The following code should fail.
failed = False
try:
FakeTensorProp(graph_model).propagate(value)
except AssertionError:
# AssertionError: tensor's device must be `meta`, got cpu instead
failed = True
self.assertTrue(failed)
def test_fake_tensor_prop_on_nn_module_with_optional_args(self):
class OptionalArgumentInBetween(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer1 = torch.nn.Linear(4, 3)
self.layer2 = torch.nn.Linear(3, 2)
def forward(self, value, another_value=None, another_optional_value=None):
# Mimic huggingface's `forward` methods which have several optional arguments.
# For example, GPT accepts forward(self, input_ids, None, attention_mask, ...).
# To apply FakeTensorProp, its from_real_tensor(...) needs to accept None.
if another_value is None:
another_value = torch.rand_like(value)
if another_optional_value is None:
another_optional_value = torch.rand_like(value)
value = value + another_value + another_optional_value
return value * value
fake_mode = FakeTensorMode(
allow_non_fake_inputs=True, allow_fallback_kernels=False
)
with fake_mode:
model = OptionalArgumentInBetween()
value = torch.randn(5, 4)
another_optional_value = torch.randn(5, 4)
graph_model = torch.fx.symbolic_trace(
model, (value, None, another_optional_value)
)
FakeTensorProp(graph_model, fake_mode).propagate(
value, None, another_optional_value
)
def test_unbacked_shape_realloc(self):
def f(x):
return x.nonzero()
shape_env = ShapeEnv()
fake_mode = FakeTensorMode(shape_env=shape_env)
with fake_mode:
value = torch.randn(5)
gm = make_fx(f)(value)
nonzero_nodes = [
n for n in gm.graph.nodes if n.target is torch.ops.aten.nonzero.default
]
self.assertEqual(len(nonzero_nodes), 1)
self.assertIsInstance(nonzero_nodes[0].meta["val"].shape[0], torch.SymInt)
u0 = nonzero_nodes[0].meta["val"].shape[0]
FakeTensorProp(gm, fake_mode).propagate(value)
u1 = nonzero_nodes[0].meta["val"].shape[0]
# Test that this test is actually doing something in that the
# FakeTensorProp actually triggered a reallocation. If this assert is
# failing, it could be because we started memoizing the nnz count for
# nonzero, which is nice in some sense (no reallocation) but not
# helpful for this test, which is checking what we do when we have
# to reallocate. If so, you need to make this example more
# complicated (e.g., maybe have a nontrivial computation on the input
# before feeding it into nonzero, or have some sort of randomness)
self.assertIsNot(u0, u1)
self.assertTrue(statically_known_true(u0 == u1))
def test_nonzero_stride(self):
shape_env = ShapeEnv()
fake_mode = FakeTensorMode(shape_env=shape_env)
with fake_mode:
value = torch.ones(5)
fake_r = value.nonzero()
r = torch.ones(5).nonzero()
self.assertEqual(fake_r.T.is_contiguous(), r.T.is_contiguous())
def test_nan_to_num(self):
shape_env = ShapeEnv()
fake_mode = FakeTensorMode(shape_env=shape_env)
with fake_mode:
x = torch.randn(5, 10).t()
y = torch.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0)
self.assertEqual(x.size(), y.size())
self.assertEqual(x.stride(), y.stride())
@unittest.skipIf(not RUN_CUDA, "requires cuda")
def test_torch_load_with_fake_mode(self):
model = torch.nn.Linear(5, 10)
sd = model.state_dict()
sd["tt"] = TwoTensor(torch.randn(2), torch.randn(2))
def _read_tensor_and_check(key, sd_loaded, all_bytes, device):
dtype = torch.float32
t = sd_loaded[key]
self.assertEqual(t.device.type, device)
if isinstance(t, TwoTensor):
untyped_storage_a, untyped_storage_b = (
t.a.untyped_storage(),
t.b.untyped_storage(),
)
offset_a, offset_b = (
untyped_storage_a._checkpoint_offset,
untyped_storage_b._checkpoint_offset,
)
nbytes_a, nbytes_b = (
untyped_storage_a.nbytes() // 4,
untyped_storage_b.nbytes() // 4,
)
result_a = torch.frombuffer(
all_bytes, dtype=dtype, count=nbytes_a, offset=offset_a
).resize_(t.a.size())
result_b = torch.frombuffer(
all_bytes, dtype=dtype, count=nbytes_b, offset=offset_b
).resize_(t.b.size())
self.assertEqual(TwoTensor(result_a, result_b), sd[key])
else:
untyped_storage = t.untyped_storage()
offset = untyped_storage._checkpoint_offset
nbytes = untyped_storage.nbytes() // 4
result = torch.frombuffer(
all_bytes, dtype=dtype, count=nbytes, offset=offset
).resize_(t.size())
self.assertEqual(result, sd[key])
with TemporaryFileName() as f, torch.serialization.safe_globals([TwoTensor]):
# Create state_dict to be loaded later
torch.save(sd, f)
with open(f, "rb") as g:
all_bytes = g.read()
fake_mode = FakeTensorMode()
with fake_mode:
sd_loaded = torch.load(f)
for k in sd:
_read_tensor_and_check(k, sd_loaded, all_bytes, "cpu")
with fake_mode:
sd_loaded = torch.load(f, map_location="cuda")
for k in sd:
_read_tensor_and_check(k, sd_loaded, all_bytes, "cuda")
for k in sd:
sd[k] = sd[k].to("cuda")
with TemporaryFileName() as f, torch.serialization.safe_globals([TwoTensor]):
torch.save(sd, f)
with open(f, "rb") as g:
all_bytes = g.read()
fake_mode = FakeTensorMode()
with fake_mode:
sd_loaded = torch.load(f)
for k in sd:
_read_tensor_and_check(k, sd_loaded, all_bytes, "cuda")
with fake_mode:
sd_loaded = torch.load(f, map_location="cpu")
for k in sd:
_read_tensor_and_check(k, sd_loaded, all_bytes, "cpu")
make_propagate_real_tensors_cls(FakeTensorPropTest)
| FakeTensorPropTest |
python | scipy__scipy | scipy/stats/_distribution_infrastructure.py | {
"start": 208543,
"end": 220553
} | class ____(_ProbabilityDistribution):
r"""Representation of a mixture distribution.
A mixture distribution is the distribution of a random variable
defined in the following way: first, a random variable is selected
from `components` according to the probabilities given by `weights`, then
the selected random variable is realized.
Parameters
----------
components : sequence of `ContinuousDistribution`
The underlying instances of `ContinuousDistribution`.
All must have scalar shape parameters (if any); e.g., the `pdf` evaluated
at a scalar argument must return a scalar.
weights : sequence of floats, optional
The corresponding probabilities of selecting each random variable.
Must be non-negative and sum to one. The default behavior is to weight
all components equally.
Attributes
----------
components : sequence of `ContinuousDistribution`
The underlying instances of `ContinuousDistribution`.
weights : ndarray
The corresponding probabilities of selecting each random variable.
Methods
-------
support
sample
moment
mean
median
mode
variance
standard_deviation
skewness
kurtosis
pdf
logpdf
cdf
icdf
ccdf
iccdf
logcdf
ilogcdf
logccdf
ilogccdf
entropy
Notes
-----
The following abbreviations are used throughout the documentation.
- PDF: probability density function
- CDF: cumulative distribution function
- CCDF: complementary CDF
- entropy: differential entropy
- log-*F*: logarithm of *F* (e.g. log-CDF)
- inverse *F*: inverse function of *F* (e.g. inverse CDF)
References
----------
.. [1] Mixture distribution, *Wikipedia*,
https://en.wikipedia.org/wiki/Mixture_distribution
Examples
--------
A mixture of normal distributions:
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> X1 = stats.Normal(mu=-2, sigma=1)
>>> X2 = stats.Normal(mu=2, sigma=1)
>>> mixture = stats.Mixture([X1, X2], weights=[0.4, 0.6])
>>> print(f'mean: {mixture.mean():.2f}, '
... f'median: {mixture.median():.2f}, '
... f'mode: {mixture.mode():.2f}')
mean: 0.40, median: 1.04, mode: 2.00
>>> x = np.linspace(-10, 10, 300)
>>> plt.plot(x, mixture.pdf(x))
>>> plt.title('PDF of normal distribution mixture')
>>> plt.show()
"""
# Todo:
# Add support for array shapes, weights
def _input_validation(self, components, weights):
if len(components) == 0:
message = ("`components` must contain at least one random variable.")
raise ValueError(message)
for var in components:
# will generalize to other kinds of distributions when there
# *are* other kinds of distributions
if not isinstance(var, ContinuousDistribution):
message = ("Each element of `components` must be an instance of "
"`ContinuousDistribution`.")
raise ValueError(message)
if not var._shape == ():
message = "All elements of `components` must have scalar shapes."
raise ValueError(message)
if weights is None:
return components, weights
weights = np.asarray(weights)
if weights.shape != (len(components),):
message = "`components` and `weights` must have the same length."
raise ValueError(message)
if not np.issubdtype(weights.dtype, np.inexact):
message = "`weights` must have floating point dtype."
raise ValueError(message)
if not np.isclose(np.sum(weights), 1.0):
message = "`weights` must sum to 1.0."
raise ValueError(message)
if not np.all(weights >= 0):
message = "All `weights` must be non-negative."
raise ValueError(message)
return components, weights
def __init__(self, components, *, weights=None):
components, weights = self._input_validation(components, weights)
n = len(components)
dtype = np.result_type(*(var._dtype for var in components))
self._shape = np.broadcast_shapes(*(var._shape for var in components))
self._dtype, self._components = dtype, components
self._weights = np.full(n, 1/n, dtype=dtype) if weights is None else weights
self.validation_policy = None
@property
def components(self):
return list(self._components)
@property
def weights(self):
return self._weights.copy()
def _full(self, val, *args):
args = [np.asarray(arg) for arg in args]
dtype = np.result_type(self._dtype, *(arg.dtype for arg in args))
shape = np.broadcast_shapes(self._shape, *(arg.shape for arg in args))
return np.full(shape, val, dtype=dtype)
def _sum(self, fun, *args):
out = self._full(0, *args)
for var, weight in zip(self._components, self._weights):
out += getattr(var, fun)(*args) * weight
return out[()]
def _logsum(self, fun, *args):
out = self._full(-np.inf, *args)
for var, log_weight in zip(self._components, np.log(self._weights)):
np.logaddexp(out, getattr(var, fun)(*args) + log_weight, out=out)
return out[()]
def support(self):
a = self._full(np.inf)
b = self._full(-np.inf)
for var in self._components:
a = np.minimum(a, var.support()[0])
b = np.maximum(b, var.support()[1])
return a, b
def _raise_if_method(self, method):
if method is not None:
raise NotImplementedError("`method` not implemented for this distribution.")
def logentropy(self, *, method=None):
self._raise_if_method(method)
def log_integrand(x):
# `x` passed by `_tanhsinh` will be of complex dtype because
# `log_integrand` returns complex values, but the imaginary
# component is always zero. Extract the real part because
# `logpdf` uses `logaddexp`, which fails for complex input.
return self.logpdf(x.real) + np.log(self.logpdf(x.real) + 0j)
res = _tanhsinh(log_integrand, *self.support(), log=True).integral
return _log_real_standardize(res + np.pi*1j)
def entropy(self, *, method=None):
self._raise_if_method(method)
return _tanhsinh(lambda x: -self.pdf(x) * self.logpdf(x),
*self.support()).integral
def mode(self, *, method=None):
self._raise_if_method(method)
a, b = self.support()
def f(x): return -self.pdf(x)
res = _bracket_minimum(f, 1., xmin=a, xmax=b)
res = _chandrupatla_minimize(f, res.xl, res.xm, res.xr)
return res.x
def median(self, *, method=None):
self._raise_if_method(method)
return self.icdf(0.5)
def mean(self, *, method=None):
self._raise_if_method(method)
return self._sum('mean')
def variance(self, *, method=None):
self._raise_if_method(method)
return self._moment_central(2)
def standard_deviation(self, *, method=None):
self._raise_if_method(method)
return self.variance()**0.5
def skewness(self, *, method=None):
self._raise_if_method(method)
return self._moment_standardized(3)
def kurtosis(self, *, method=None):
self._raise_if_method(method)
return self._moment_standardized(4)
def moment(self, order=1, kind='raw', *, method=None):
self._raise_if_method(method)
kinds = {'raw': self._moment_raw,
'central': self._moment_central,
'standardized': self._moment_standardized}
order = ContinuousDistribution._validate_order_kind(self, order, kind, kinds)
moment_kind = kinds[kind]
return moment_kind(order)
def _moment_raw(self, order):
out = self._full(0)
for var, weight in zip(self._components, self._weights):
out += var.moment(order, kind='raw') * weight
return out[()]
def _moment_central(self, order):
order = int(order)
out = self._full(0)
for var, weight in zip(self._components, self._weights):
moment_as = [var.moment(order, kind='central')
for order in range(order + 1)]
a, b = var.mean(), self.mean()
moment = var._moment_transform_center(order, moment_as, a, b)
out += moment * weight
return out[()]
def _moment_standardized(self, order):
return self._moment_central(order) / self.standard_deviation()**order
def pdf(self, x, /, *, method=None):
self._raise_if_method(method)
return self._sum('pdf', x)
def logpdf(self, x, /, *, method=None):
self._raise_if_method(method)
return self._logsum('logpdf', x)
def pmf(self, x, /, *, method=None):
self._raise_if_method(method)
return self._sum('pmf', x)
def logpmf(self, x, /, *, method=None):
self._raise_if_method(method)
return self._logsum('logpmf', x)
def cdf(self, x, y=None, /, *, method=None):
self._raise_if_method(method)
args = (x,) if y is None else (x, y)
return self._sum('cdf', *args)
def logcdf(self, x, y=None, /, *, method=None):
self._raise_if_method(method)
args = (x,) if y is None else (x, y)
return self._logsum('logcdf', *args)
def ccdf(self, x, y=None, /, *, method=None):
self._raise_if_method(method)
args = (x,) if y is None else (x, y)
return self._sum('ccdf', *args)
def logccdf(self, x, y=None, /, *, method=None):
self._raise_if_method(method)
args = (x,) if y is None else (x, y)
return self._logsum('logccdf', *args)
def _invert(self, fun, p):
xmin, xmax = self.support()
fun = getattr(self, fun)
f = lambda x, p: fun(x) - p # noqa: E731 is silly
xl0, xr0 = _guess_bracket(xmin, xmax)
res = _bracket_root(f, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, args=(p,))
return _chandrupatla(f, a=res.xl, b=res.xr, args=(p,)).x
def icdf(self, p, /, *, method=None):
self._raise_if_method(method)
return self._invert('cdf', p)
def iccdf(self, p, /, *, method=None):
self._raise_if_method(method)
return self._invert('ccdf', p)
def ilogcdf(self, p, /, *, method=None):
self._raise_if_method(method)
return self._invert('logcdf', p)
def ilogccdf(self, p, /, *, method=None):
self._raise_if_method(method)
return self._invert('logccdf', p)
def sample(self, shape=(), *, rng=None, method=None):
self._raise_if_method(method)
rng = np.random.default_rng(rng)
size = np.prod(np.atleast_1d(shape))
ns = rng.multinomial(size, self._weights)
x = [var.sample(shape=n, rng=rng) for n, var in zip(ns, self._components)]
x = np.reshape(rng.permuted(np.concatenate(x)), shape)
return x[()]
def __repr__(self):
result = "Mixture(\n"
result += " [\n"
with np.printoptions(threshold=10):
for component in self.components:
result += f" {repr(component)},\n"
result += " ],\n"
result += f" weights={repr(self.weights)},\n"
result += ")"
return result
def __str__(self):
result = "Mixture(\n"
result += " [\n"
with np.printoptions(threshold=10):
for component in self.components:
result += f" {str(component)},\n"
result += " ],\n"
result += f" weights={str(self.weights)},\n"
result += ")"
return result
| Mixture |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 173227,
"end": 176887
} | class ____(ColumnCollectionConstraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
_allow_multiple_tables = True
__visit_name__ = "table_or_column_check_constraint"
@_document_text_coercion(
"sqltext",
":class:`.CheckConstraint`",
":paramref:`.CheckConstraint.sqltext`",
)
def __init__(
self,
sqltext: _TextCoercedExpressionArgument[Any],
name: _ConstraintNameArgument = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
table: Optional[Table] = None,
info: Optional[_InfoType] = None,
_create_rule: Optional[Any] = None,
_autoattach: bool = True,
_type_bound: bool = False,
**dialect_kw: Any,
) -> None:
r"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct. If given as a string,
the object is converted to a :func:`_expression.text` object.
If the textual
string includes a colon character, escape this using a backslash::
CheckConstraint(r"foo ~ E'a(?\:b|c)d")
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
"""
self.sqltext = coercions.expect(roles.DDLExpressionRole, sqltext)
columns: List[Column[Any]] = []
visitors.traverse(self.sqltext, {}, {"column": columns.append})
super().__init__(
name=name,
deferrable=deferrable,
initially=initially,
_create_rule=_create_rule,
info=info,
_type_bound=_type_bound,
_autoattach=_autoattach,
*columns,
**dialect_kw,
)
if table is not None:
self._set_parent_with_dispatch(table)
@property
def is_column_level(self) -> bool:
return not isinstance(self.parent, Table)
@util.deprecated(
"1.4",
"The :meth:`_schema.CheckConstraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> CheckConstraint:
return self._copy(target_table=target_table, **kw)
def _copy(
self, *, target_table: Optional[Table] = None, **kw: Any
) -> CheckConstraint:
if target_table is not None:
# note that target_table is None for the copy process of
# a column-bound CheckConstraint, so this path is not reached
# in that case.
sqltext = _copy_expression(self.sqltext, self.table, target_table)
else:
sqltext = self.sqltext
c = CheckConstraint(
sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule,
table=target_table,
comment=self.comment,
_autoattach=False,
_type_bound=self._type_bound,
)
return self._schema_item_copy(c)
| CheckConstraint |
python | wandb__wandb | wandb/sdk/artifacts/storage_layout.py | {
"start": 83,
"end": 313
} | class ____(str, Enum):
V1 = "V1"
V2 = "V2"
@classmethod
def from_env(cls) -> StorageLayout:
from wandb.env import get_use_v1_artifacts
return cls.V1 if get_use_v1_artifacts() else cls.V2
| StorageLayout |
python | celery__celery | t/unit/utils/test_collections.py | {
"start": 1575,
"end": 4208
} | class ____:
def setup_method(self):
self.view = ConfigurationView(
{'changed_key': 1, 'both': 2},
[
{'default_key': 1, 'both': 1},
],
)
def test_setdefault(self):
self.view.setdefault('both', 36)
assert self.view['both'] == 2
self.view.setdefault('new', 36)
assert self.view['new'] == 36
def test_get(self):
assert self.view.get('both') == 2
sp = object()
assert self.view.get('nonexisting', sp) is sp
def test_update(self):
changes = dict(self.view.changes)
self.view.update(a=1, b=2, c=3)
assert self.view.changes == dict(changes, a=1, b=2, c=3)
def test_contains(self):
assert 'changed_key' in self.view
assert 'default_key' in self.view
assert 'new' not in self.view
def test_repr(self):
assert 'changed_key' in repr(self.view)
assert 'default_key' in repr(self.view)
def test_iter(self):
expected = {
'changed_key': 1,
'default_key': 1,
'both': 2,
}
assert dict(self.view.items()) == expected
assert sorted(list(iter(self.view))) == sorted(list(expected.keys()))
assert sorted(list(self.view.keys())) == sorted(list(expected.keys()))
assert (sorted(list(self.view.values())) ==
sorted(list(expected.values())))
assert 'changed_key' in list(self.view.keys())
assert 2 in list(self.view.values())
assert ('both', 2) in list(self.view.items())
def test_add_defaults_dict(self):
defaults = {'foo': 10}
self.view.add_defaults(defaults)
assert self.view.foo == 10
def test_add_defaults_object(self):
defaults = Bunch(foo=10)
self.view.add_defaults(defaults)
assert self.view.foo == 10
def test_clear(self):
self.view.clear()
assert self.view.both == 1
assert 'changed_key' not in self.view
def test_bool(self):
assert bool(self.view)
self.view.maps[:] = []
assert not bool(self.view)
def test_len(self):
assert len(self.view) == 3
self.view.KEY = 33
assert len(self.view) == 4
self.view.clear()
assert len(self.view) == 2
def test_isa_mapping(self):
from collections.abc import Mapping
assert issubclass(ConfigurationView, Mapping)
def test_isa_mutable_mapping(self):
from collections.abc import MutableMapping
assert issubclass(ConfigurationView, MutableMapping)
| test_ConfigurationView |
python | pallets__quart | src/quart/typing.py | {
"start": 5995,
"end": 9608
} | class ____(Protocol):
app: Quart
cookie_jar: CookieJar | None
http_connection_class: type[TestHTTPConnectionProtocol]
push_promises: list[tuple[str, Headers]]
websocket_connection_class: type[TestWebsocketConnectionProtocol]
def __init__(self, app: Quart, use_cookies: bool = True) -> None: ...
async def open(
self,
path: str,
*,
method: str = "GET",
headers: dict | Headers | None = None,
data: AnyStr | None = None,
form: dict | None = None,
files: dict[str, FileStorage] | None = None,
query_string: dict | None = None,
json: Any,
scheme: str = "http",
follow_redirects: bool = False,
root_path: str = "",
http_version: str = "1.1",
scope_base: dict | None = None,
auth: Authorization | tuple[str, str] | None = None,
subdomain: str | None = None,
) -> Response: ...
def request(
self,
path: str,
*,
method: str = "GET",
headers: dict | Headers | None = None,
query_string: dict | None = None,
scheme: str = "http",
root_path: str = "",
http_version: str = "1.1",
scope_base: dict | None = None,
auth: Authorization | tuple[str, str] | None = None,
subdomain: str | None = None,
) -> TestHTTPConnectionProtocol: ...
def websocket(
self,
path: str,
*,
headers: dict | Headers | None = None,
query_string: dict | None = None,
scheme: str = "ws",
subprotocols: list[str] | None = None,
root_path: str = "",
http_version: str = "1.1",
scope_base: dict | None = None,
auth: Authorization | tuple[str, str] | None = None,
subdomain: str | None = None,
) -> TestWebsocketConnectionProtocol: ...
async def delete(self, *args: Any, **kwargs: Any) -> Response: ...
async def get(self, *args: Any, **kwargs: Any) -> Response: ...
async def head(self, *args: Any, **kwargs: Any) -> Response: ...
async def options(self, *args: Any, **kwargs: Any) -> Response: ...
async def patch(self, *args: Any, **kwargs: Any) -> Response: ...
async def post(self, *args: Any, **kwargs: Any) -> Response: ...
async def put(self, *args: Any, **kwargs: Any) -> Response: ...
async def trace(self, *args: Any, **kwargs: Any) -> Response: ...
def set_cookie(
self,
server_name: str,
key: str,
value: str = "",
max_age: int | timedelta | None = None,
expires: int | float | datetime | None = None,
path: str = "/",
domain: str | None = None,
secure: bool = False,
httponly: bool = False,
samesite: str = None,
charset: str = "utf-8",
) -> None: ...
def delete_cookie(
self, server_name: str, key: str, path: str = "/", domain: str | None = None
) -> None: ...
def session_transaction(
self,
path: str = "/",
*,
method: str = "GET",
headers: dict | Headers | None = None,
query_string: dict | None = None,
scheme: str = "http",
data: AnyStr | None = None,
form: dict | None = None,
json: Any = None,
root_path: str = "",
http_version: str = "1.1",
) -> AbstractAsyncContextManager[SessionMixin]: ...
async def __aenter__(self) -> TestClientProtocol: ...
async def __aexit__(
self, exc_type: type, exc_value: BaseException, tb: TracebackType
) -> None: ...
| TestClientProtocol |
python | realpython__materials | python-tuple/employee_named_tuple.py | {
"start": 43,
"end": 375
} | class ____(NamedTuple):
name: str
age: int
position: str = "Python Developer"
with open("employees.csv", mode="r") as csv_file:
reader = csv.reader(csv_file)
next(reader) # Skip headers
employees = []
for name, age, position in reader:
employees.append(Employee(name, int(age), position))
| Employee |
python | numba__llvmlite | llvmlite/ir/_utils.py | {
"start": 1433,
"end": 2001
} | class ____(object):
def set_metadata(self, name, node):
"""
Attach unnamed metadata *node* to the metadata slot *name* of this
value.
"""
self.metadata[name] = node
def _stringify_metadata(self, leading_comma=False):
if self.metadata:
buf = []
if leading_comma:
buf.append("")
buf += ["!{0} {1}".format(k, v.get_reference())
for k, v in self.metadata.items()]
return ', '.join(buf)
else:
return ''
| _HasMetadata |
python | realpython__materials | inheritance-and-composition/composition/contacts.py | {
"start": 0,
"end": 442
} | class ____:
def __init__(self, street, city, state, zipcode, street2=""):
self.street = street
self.street2 = street2
self.city = city
self.state = state
self.zipcode = zipcode
def __str__(self):
lines = [self.street]
if self.street2:
lines.append(self.street2)
lines.append(f"{self.city}, {self.state} {self.zipcode}")
return "\n".join(lines)
| Address |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 3459,
"end": 3575
} | class ____(StructuralRole):
__slots__ = ()
_role_name = "statement sub-expression element"
| StatementOptionRole |
python | sympy__sympy | sympy/matrices/expressions/matmul.py | {
"start": 935,
"end": 15509
} | class ____(MatrixExpr, Mul):
"""
A product of matrix expressions
Examples
========
>>> from sympy import MatMul, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 4)
>>> B = MatrixSymbol('B', 4, 3)
>>> C = MatrixSymbol('C', 3, 6)
>>> MatMul(A, B, C)
A*B*C
"""
is_MatMul = True
identity = GenericIdentity()
def __new__(cls, *args, evaluate=False, check=None, _sympify=True):
if not args:
return cls.identity
# This must be removed aggressively in the constructor to avoid
# TypeErrors from GenericIdentity().shape
args = list(filter(lambda i: cls.identity != i, args))
if _sympify:
args = list(map(sympify, args))
obj = Basic.__new__(cls, *args)
factor, matrices = obj.as_coeff_matrices()
if check is not None:
sympy_deprecation_warning(
"Passing check to MatMul is deprecated and the check argument will be removed in a future version.",
deprecated_since_version="1.11",
active_deprecations_target='remove-check-argument-from-matrix-operations')
if check is not False:
validate(*matrices)
if not matrices:
# Should it be
#
# return Basic.__neq__(cls, factor, GenericIdentity()) ?
return factor
if evaluate:
return cls._evaluate(obj)
return obj
@classmethod
def _evaluate(cls, expr):
return canonicalize(expr)
@property
def shape(self):
matrices = [arg for arg in self.args if arg.is_Matrix]
return (matrices[0].rows, matrices[-1].cols)
def _entry(self, i, j, expand=True, **kwargs):
# Avoid cyclic imports
from sympy.concrete.summations import Sum
from sympy.matrices.immutable import ImmutableMatrix
coeff, matrices = self.as_coeff_matrices()
if len(matrices) == 1: # situation like 2*X, matmul is just X
return coeff * matrices[0][i, j]
indices = [None]*(len(matrices) + 1)
ind_ranges = [None]*(len(matrices) - 1)
indices[0] = i
indices[-1] = j
def f():
counter = 1
while True:
yield Dummy("i_%i" % counter)
counter += 1
dummy_generator = kwargs.get("dummy_generator", f())
for i in range(1, len(matrices)):
indices[i] = next(dummy_generator)
for i, arg in enumerate(matrices[:-1]):
ind_ranges[i] = arg.shape[1] - 1
matrices = [arg._entry(indices[i], indices[i+1], dummy_generator=dummy_generator) for i, arg in enumerate(matrices)]
expr_in_sum = Mul.fromiter(matrices)
if any(v.has(ImmutableMatrix) for v in matrices):
expand = True
result = coeff*Sum(
expr_in_sum,
*zip(indices[1:-1], [0]*len(ind_ranges), ind_ranges)
)
# Don't waste time in result.doit() if the sum bounds are symbolic
if not any(isinstance(v, (Integer, int)) for v in ind_ranges):
expand = False
return result.doit() if expand else result
def as_coeff_matrices(self):
scalars = [x for x in self.args if not x.is_Matrix]
matrices = [x for x in self.args if x.is_Matrix]
coeff = Mul(*scalars)
if coeff.is_commutative is False:
raise NotImplementedError("noncommutative scalars in MatMul are not supported.")
return coeff, matrices
def as_coeff_mmul(self):
coeff, matrices = self.as_coeff_matrices()
return coeff, MatMul(*matrices)
def expand(self, **kwargs):
expanded = super(MatMul, self).expand(**kwargs)
return self._evaluate(expanded)
def _eval_transpose(self):
"""Transposition of matrix multiplication.
Notes
=====
The following rules are applied.
Transposition for matrix multiplied with another matrix:
`\\left(A B\\right)^{T} = B^{T} A^{T}`
Transposition for matrix multiplied with scalar:
`\\left(c A\\right)^{T} = c A^{T}`
References
==========
.. [1] https://en.wikipedia.org/wiki/Transpose
"""
coeff, matrices = self.as_coeff_matrices()
return MatMul(
coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()
def _eval_adjoint(self):
return MatMul(*[adjoint(arg) for arg in self.args[::-1]]).doit()
def _eval_trace(self):
factor, mmul = self.as_coeff_mmul()
if factor != 1:
from .trace import trace
return factor * trace(mmul.doit())
def _eval_determinant(self):
from sympy.matrices.expressions.determinant import Determinant
factor, matrices = self.as_coeff_matrices()
square_matrices = only_squares(*matrices)
return factor**self.rows * Mul(*list(map(Determinant, square_matrices)))
def _eval_inverse(self):
if all(arg.is_square for arg in self.args if isinstance(arg, MatrixExpr)):
return MatMul(*(
arg.inverse() if isinstance(arg, MatrixExpr) else arg**-1
for arg in self.args[::-1]
)
).doit()
return Inverse(self)
def doit(self, **hints):
deep = hints.get('deep', True)
if deep:
args = tuple(arg.doit(**hints) for arg in self.args)
else:
args = self.args
# treat scalar*MatrixSymbol or scalar*MatPow separately
expr = canonicalize(MatMul(*args))
return expr
# Needed for partial compatibility with Mul
def args_cnc(self, cset=False, warn=True, **kwargs):
coeff_c = [x for x in self.args if x.is_commutative]
coeff_nc = [x for x in self.args if not x.is_commutative]
if cset:
clen = len(coeff_c)
coeff_c = set(coeff_c)
if clen and warn and len(coeff_c) != clen:
raise ValueError('repeated commutative arguments: %s' %
[ci for ci in coeff_c if list(self.args).count(ci) > 1])
return [coeff_c, coeff_nc]
def _eval_derivative_matrix_lines(self, x):
from .transpose import Transpose
with_x_ind = [i for i, arg in enumerate(self.args) if arg.has(x)]
lines = []
for ind in with_x_ind:
left_args = self.args[:ind]
right_args = self.args[ind+1:]
if right_args:
right_mat = MatMul.fromiter(right_args)
else:
right_mat = Identity(self.shape[1])
if left_args:
left_rev = MatMul.fromiter([Transpose(i).doit() if i.is_Matrix else i for i in reversed(left_args)])
else:
left_rev = Identity(self.shape[0])
d = self.args[ind]._eval_derivative_matrix_lines(x)
for i in d:
i.append_first(left_rev)
i.append_second(right_mat)
lines.append(i)
return lines
mul.register_handlerclass((Mul, MatMul), MatMul)
# Rules
def newmul(*args):
if args[0] == 1:
args = args[1:]
return new(MatMul, *args)
def any_zeros(mul):
if any(arg.is_zero or (arg.is_Matrix and arg.is_ZeroMatrix)
for arg in mul.args):
matrices = [arg for arg in mul.args if arg.is_Matrix]
return ZeroMatrix(matrices[0].rows, matrices[-1].cols)
return mul
def merge_explicit(matmul):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, Matrix, MatMul, pprint
>>> from sympy.matrices.expressions.matmul import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = Matrix([[1, 1], [1, 1]])
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatMul(A, B, C)
>>> pprint(X)
[1 1] [1 2]
A*[ ]*[ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[4 6]
A*[ ]
[4 6]
>>> X = MatMul(B, A, C)
>>> pprint(X)
[1 1] [1 2]
[ ]*A*[ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[1 1] [1 2]
[ ]*A*[ ]
[1 1] [3 4]
"""
if not any(isinstance(arg, MatrixBase) for arg in matmul.args):
return matmul
newargs = []
last = matmul.args[0]
for arg in matmul.args[1:]:
if isinstance(arg, (MatrixBase, Number)) and isinstance(last, (MatrixBase, Number)):
last = last * arg
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
def remove_ids(mul):
""" Remove Identities from a MatMul
This is a modified version of sympy.strategies.rm_id.
This is necessary because MatMul may contain both MatrixExprs and Exprs
as args.
See Also
========
sympy.strategies.rm_id
"""
# Separate Exprs from MatrixExprs in args
factor, mmul = mul.as_coeff_mmul()
# Apply standard rm_id for MatMuls
result = rm_id(lambda x: x.is_Identity is True)(mmul)
if result != mmul:
return newmul(factor, *result.args) # Recombine and return
else:
return mul
def factor_in_front(mul):
factor, matrices = mul.as_coeff_matrices()
if factor != 1:
return newmul(factor, *matrices)
return mul
def combine_powers(mul):
r"""Combine consecutive powers with the same base into one, e.g.
$$A \times A^2 \Rightarrow A^3$$
This also cancels out the possible matrix inverses using the
knowledgebase of :class:`~.Inverse`, e.g.,
$$ Y \times X \times X^{-1} \Rightarrow Y $$
"""
factor, args = mul.as_coeff_matrices()
new_args = [args[0]]
for i in range(1, len(args)):
A = new_args[-1]
B = args[i]
if isinstance(B, Inverse) and isinstance(B.arg, MatMul):
Bargs = B.arg.args
l = len(Bargs)
if list(Bargs) == new_args[-l:]:
new_args = new_args[:-l] + [Identity(B.shape[0])]
continue
if isinstance(A, Inverse) and isinstance(A.arg, MatMul):
Aargs = A.arg.args
l = len(Aargs)
if list(Aargs) == args[i:i+l]:
identity = Identity(A.shape[0])
new_args[-1] = identity
for j in range(i, i+l):
args[j] = identity
continue
if A.is_square == False or B.is_square == False:
new_args.append(B)
continue
if isinstance(A, MatPow):
A_base, A_exp = A.args
else:
A_base, A_exp = A, S.One
if isinstance(B, MatPow):
B_base, B_exp = B.args
else:
B_base, B_exp = B, S.One
if A_base == B_base:
new_exp = A_exp + B_exp
new_args[-1] = MatPow(A_base, new_exp).doit(deep=False)
continue
elif not isinstance(B_base, MatrixBase):
try:
B_base_inv = B_base.inverse()
except NonInvertibleMatrixError:
B_base_inv = None
if B_base_inv is not None and A_base == B_base_inv:
new_exp = A_exp - B_exp
new_args[-1] = MatPow(A_base, new_exp).doit(deep=False)
continue
new_args.append(B)
return newmul(factor, *new_args)
def combine_permutations(mul):
"""Refine products of permutation matrices as the products of cycles.
"""
args = mul.args
l = len(args)
if l < 2:
return mul
result = [args[0]]
for i in range(1, l):
A = result[-1]
B = args[i]
if isinstance(A, PermutationMatrix) and \
isinstance(B, PermutationMatrix):
cycle_1 = A.args[0]
cycle_2 = B.args[0]
result[-1] = PermutationMatrix(cycle_1 * cycle_2)
else:
result.append(B)
return MatMul(*result)
def combine_one_matrices(mul):
"""
Combine products of OneMatrix
e.g. OneMatrix(2, 3) * OneMatrix(3, 4) -> 3 * OneMatrix(2, 4)
"""
factor, args = mul.as_coeff_matrices()
new_args = [args[0]]
for B in args[1:]:
A = new_args[-1]
if not isinstance(A, OneMatrix) or not isinstance(B, OneMatrix):
new_args.append(B)
continue
new_args.pop()
new_args.append(OneMatrix(A.shape[0], B.shape[1]))
factor *= A.shape[1]
return newmul(factor, *new_args)
def distribute_monom(mul):
"""
Simplify MatMul expressions but distributing
rational term to MatMul.
e.g. 2*(A+B) -> 2*A + 2*B
"""
args = mul.args
if len(args) == 2:
from .matadd import MatAdd
if args[0].is_MatAdd and args[1].is_Rational:
return MatAdd(*[MatMul(mat, args[1]).doit() for mat in args[0].args])
if args[1].is_MatAdd and args[0].is_Rational:
return MatAdd(*[MatMul(args[0], mat).doit() for mat in args[1].args])
return mul
rules = (
distribute_monom, any_zeros, remove_ids, combine_one_matrices, combine_powers, unpack, rm_id(lambda x: x == 1),
merge_explicit, factor_in_front, flatten, combine_permutations)
canonicalize = exhaust(typed({MatMul: do_one(*rules)}))
def only_squares(*matrices):
"""factor matrices only if they are square"""
if matrices[0].rows != matrices[-1].cols:
raise RuntimeError("Invalid matrices being multiplied")
out = []
start = 0
for i, M in enumerate(matrices):
if M.cols == matrices[start].rows:
out.append(MatMul(*matrices[start:i+1]).doit())
start = i+1
return out
def refine_MatMul(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine
>>> X = MatrixSymbol('X', 2, 2)
>>> expr = X * X.T
>>> print(expr)
X*X.T
>>> with assuming(Q.orthogonal(X)):
... print(refine(expr))
I
"""
newargs = []
exprargs = []
for args in expr.args:
if args.is_Matrix:
exprargs.append(args)
else:
newargs.append(args)
last = exprargs[0]
for arg in exprargs[1:]:
if arg == last.T and ask(Q.orthogonal(arg), assumptions):
last = Identity(arg.shape[0])
elif arg == last.conjugate() and ask(Q.unitary(arg), assumptions):
last = Identity(arg.shape[0])
else:
newargs.append(last)
last = arg
newargs.append(last)
return MatMul(*newargs)
handlers_dict['MatMul'] = refine_MatMul
| MatMul |
python | huggingface__transformers | src/transformers/models/beit/modeling_beit.py | {
"start": 39931,
"end": 41125
} | class ____(nn.Module):
"""
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, tuple[int, int]],
padding: Union[int, tuple[int, int], str] = 0,
bias: bool = False,
dilation: Union[int, tuple[int, int]] = 1,
) -> None:
super().__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias,
dilation=dilation,
)
self.bn = nn.BatchNorm2d(out_channels)
self.activation = nn.ReLU()
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = self.conv(input)
output = self.bn(output)
output = self.activation(output)
return output
| BeitConvModule |
python | scikit-learn__scikit-learn | sklearn/cluster/_mean_shift.py | {
"start": 10268,
"end": 20332
} | class ____(ClusterMixin, BaseEstimator):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
For an example of how to use MeanShift clustering, refer to:
:ref:`sphx_glr_auto_examples_cluster_plot_mean_shift.py`.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, default=None
Bandwidth used in the flat kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array-like of shape (n_samples, n_features), default=None
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
The default value is False.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int, default=None
The number of jobs to use for the computation. The following tasks benefit
from the parallelization:
- The search of nearest neighbors for bandwidth estimation and label
assignments. See the details in the docstring of the
``NearestNeighbors`` class.
- Hill-climbing optimization for all seeds.
See :term:`Glossary <n_jobs>` for more details.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
.. versionadded:: 0.22
Attributes
----------
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels_ : ndarray of shape (n_samples,)
Labels of each point.
n_iter_ : int
Maximum number of iterations performed on each seed.
.. versionadded:: 0.22
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
KMeans : K-Means clustering.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
Examples
--------
>>> from sklearn.cluster import MeanShift
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = MeanShift(bandwidth=2).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering.predict([[0, 0], [5, 5]])
array([1, 0])
>>> clustering
MeanShift(bandwidth=2)
For a comparison of Mean Shift clustering with other clustering algorithms, see
:ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"bandwidth": [Interval(Real, 0, None, closed="neither"), None],
"seeds": ["array-like", None],
"bin_seeding": ["boolean"],
"min_bin_freq": [Interval(Integral, 1, None, closed="left")],
"cluster_all": ["boolean"],
"n_jobs": [Integral, None],
"max_iter": [Interval(Integral, 0, None, closed="left")],
}
def __init__(
self,
*,
bandwidth=None,
seeds=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
n_jobs=None,
max_iter=300,
):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
self.max_iter = max_iter
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
"""
X = validate_data(self, X)
bandwidth = self.bandwidth
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
seeds = self.seeds
if seeds is None:
if self.bin_seeding:
seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
# We use n_jobs=1 because this will be used in nested calls under
# parallel calls to _mean_shift_single_seed so there is no need for
# for further parallelism.
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=self.n_jobs)(
delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
for seed in seeds
)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i][1]: # i.e. len(points_within) > 0
center_intensity_dict[all_res[i][0]] = all_res[i][1]
self.n_iter_ = max([x[2] for x in all_res])
if not center_intensity_dict:
# nothing near seeds
raise ValueError(
"No point was within bandwidth=%f of any seed. Try a different seeding"
" strategy or increase the bandwidth."
% bandwidth
)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(
center_intensity_dict.items(),
key=lambda tup: (tup[1], tup[0]),
reverse=True,
)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=bool)
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
sorted_centers
)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
0
]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=int)
distances, idxs = nbrs.kneighbors(X)
if self.cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
self.cluster_centers_, self.labels_ = cluster_centers, labels
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
| MeanShift |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/clipboard/base.py | {
"start": 1415,
"end": 1768
} | class ____(Clipboard):
"""
Clipboard implementation that doesn't remember anything.
"""
def set_data(self, data: ClipboardData) -> None:
pass
def set_text(self, text: str) -> None:
pass
def rotate(self) -> None:
pass
def get_data(self) -> ClipboardData:
return ClipboardData()
| DummyClipboard |
python | sympy__sympy | sympy/core/numbers.py | {
"start": 120297,
"end": 122388
} | class ____(NumberSymbol, metaclass=Singleton):
r"""The tribonacci constant.
Explanation
===========
The tribonacci numbers are like the Fibonacci numbers, but instead
of starting with two predetermined terms, the sequence starts with
three predetermined terms and each term afterwards is the sum of the
preceding three terms.
The tribonacci constant is the ratio toward which adjacent tribonacci
numbers tend. It is a root of the polynomial `x^3 - x^2 - x - 1 = 0`,
and also satisfies the equation `x + x^{-3} = 2`.
TribonacciConstant is a singleton, and can be accessed
by ``S.TribonacciConstant``.
Examples
========
>>> from sympy import S
>>> S.TribonacciConstant > 1
True
>>> S.TribonacciConstant.expand(func=True)
1/3 + (19 - 3*sqrt(33))**(1/3)/3 + (3*sqrt(33) + 19)**(1/3)/3
>>> S.TribonacciConstant.is_irrational
True
>>> S.TribonacciConstant.n(20)
1.8392867552141611326
References
==========
.. [1] https://en.wikipedia.org/wiki/Generalizations_of_Fibonacci_numbers#Tribonacci_numbers
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = ()
def _latex(self, printer):
return r"\text{TribonacciConstant}"
def __int__(self):
return 1
def _as_mpf_val(self, prec):
return self._eval_evalf(prec)._mpf_
def _eval_evalf(self, prec):
rv = self._eval_expand_func(function=True)._eval_evalf(prec + 4)
return Float(rv, precision=prec)
def _eval_expand_func(self, **hints):
from sympy.functions.elementary.miscellaneous import cbrt, sqrt
return (1 + cbrt(19 - 3*sqrt(33)) + cbrt(19 + 3*sqrt(33))) / 3
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
_eval_rewrite_as_sqrt = _eval_expand_func
| TribonacciConstant |
python | pallets__jinja | src/jinja2/compiler.py | {
"start": 9182,
"end": 73918
} | class ____(NodeVisitor):
def __init__(
self,
environment: "Environment",
name: str | None,
filename: str | None,
stream: t.TextIO | None = None,
defer_init: bool = False,
optimized: bool = True,
) -> None:
if stream is None:
stream = StringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
self.optimizer: Optimizer | None = None
if optimized:
self.optimizer = Optimizer(environment)
# aliases for imports
self.import_aliases: dict[str, str] = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks: dict[str, nodes.Block] = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests: dict[str, str] = {}
self.filters: dict[str, str] = {}
# the debug information
self.debug_info: list[tuple[int, int]] = []
self._write_debug_info: int | None = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# Tracks toplevel assignments
self._assign_stack: list[set[str]] = []
# Tracks parameter definition blocks
self._param_def_block: list[set[str]] = []
# Tracks the current context.
self._context_reference_stack = ["context"]
@property
def optimized(self) -> bool:
return self.optimizer is not None
# -- Various compilation helpers
def fail(self, msg: str, lineno: int) -> "te.NoReturn":
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self) -> str:
"""Get a new unique identifier."""
self._last_identifier += 1
return f"t_{self._last_identifier}"
def buffer(self, frame: Frame) -> None:
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline(f"{frame.buffer} = []")
def return_buffer_contents(
self, frame: Frame, force_unescaped: bool = False
) -> None:
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline("if context.eval_ctx.autoescape:")
self.indent()
self.writeline(f"return Markup(concat({frame.buffer}))")
self.outdent()
self.writeline("else:")
self.indent()
self.writeline(f"return concat({frame.buffer})")
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline(f"return Markup(concat({frame.buffer}))")
return
self.writeline(f"return concat({frame.buffer})")
def indent(self) -> None:
"""Indent by one."""
self._indentation += 1
def outdent(self, step: int = 1) -> None:
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame: Frame, node: nodes.Node | None = None) -> None:
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline("yield ", node)
else:
self.writeline(f"{frame.buffer}.append(", node)
def end_write(self, frame: Frame) -> None:
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(")")
def simple_write(
self, s: str, frame: Frame, node: nodes.Node | None = None
) -> None:
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes: t.Iterable[nodes.Node], frame: Frame) -> None:
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
self.writeline("pass")
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x: str) -> None:
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write("\n" * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info, self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(" " * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x: str, node: nodes.Node | None = None, extra: int = 0) -> None:
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node: nodes.Node | None = None, extra: int = 0) -> None:
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(
self,
node: nodes.Call | nodes.Filter | nodes.Test,
frame: Frame,
extra_kwargs: t.Mapping[str, t.Any] | None = None,
) -> None:
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occur. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = any(
is_python_keyword(t.cast(str, k))
for k in chain((x.key for x in node.kwargs), extra_kwargs or ())
)
for arg in node.args:
self.write(", ")
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(", ")
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in extra_kwargs.items():
self.write(f", {key}={value}")
if node.dyn_args:
self.write(", *")
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(", **dict({")
else:
self.write(", **{")
for kwarg in node.kwargs:
self.write(f"{kwarg.key!r}: ")
self.visit(kwarg.value, frame)
self.write(", ")
if extra_kwargs is not None:
for key, value in extra_kwargs.items():
self.write(f"{key!r}: {value}, ")
if node.dyn_kwargs is not None:
self.write("}, **")
self.visit(node.dyn_kwargs, frame)
self.write(")")
else:
self.write("}")
elif node.dyn_kwargs is not None:
self.write(", **")
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes: t.Iterable[nodes.Node]) -> None:
"""Find all filter and test names used in the template and
assign them to variables in the compiled namespace. Checking
that the names are registered with the environment is done when
compiling the Filter and Test nodes. If the node is in an If or
CondExpr node, the check is done at runtime instead.
.. versionchanged:: 3.0
Filters and tests in If and CondExpr nodes are checked at
runtime instead of compile time.
"""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for id_map, names, dependency in (
(self.filters, visitor.filters, "filters"),
(
self.tests,
visitor.tests,
"tests",
),
):
for name in sorted(names):
if name not in id_map:
id_map[name] = self.temporary_identifier()
# add check during runtime that dependencies used inside of executed
# blocks are defined, as this step may be skipped during compile time
self.writeline("try:")
self.indent()
self.writeline(f"{id_map[name]} = environment.{dependency}[{name!r}]")
self.outdent()
self.writeline("except KeyError:")
self.indent()
self.writeline("@internalcode")
self.writeline(f"def {id_map[name]}(*unused):")
self.indent()
self.writeline(
f'raise TemplateRuntimeError("No {dependency[:-1]}'
f' named {name!r} found.")'
)
self.outdent()
self.outdent()
def enter_frame(self, frame: Frame) -> None:
undefs = []
for target, (action, param) in frame.symbols.loads.items():
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
self.writeline(f"{target} = {self.get_resolve_func()}({param!r})")
elif action == VAR_LOAD_ALIAS:
self.writeline(f"{target} = {param}")
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
raise NotImplementedError("unknown load instruction")
if undefs:
self.writeline(f"{' = '.join(undefs)} = missing")
def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None:
if not with_python_scope:
undefs = []
for target in frame.symbols.loads:
undefs.append(target)
if undefs:
self.writeline(f"{' = '.join(undefs)} = missing")
def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str:
return async_value if self.environment.is_async else sync_value
def func(self, name: str) -> str:
return f"{self.choose_async()}def {name}"
def macro_body(
self, node: nodes.Macro | nodes.CallBlock, frame: Frame
) -> tuple[Frame, MacroRef]:
"""Dump the function def of a macro or call block."""
frame = frame.inner()
frame.symbols.analyze_node(node)
macro_ref = MacroRef(node)
explicit_caller = None
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
if arg.name == "caller":
explicit_caller = idx
if arg.name in ("kwargs", "varargs"):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
if "caller" in undeclared:
# In older Jinja versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
# checking this now and error out if it is anywhere else in
# the argument list.
if explicit_caller is not None:
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
self.fail(
"When defining macros or call blocks the "
'special "caller" argument must be omitted '
"or be given a default.",
node.lineno,
)
else:
args.append(frame.symbols.declare_parameter("caller"))
macro_ref.accesses_caller = True
if "kwargs" in undeclared and "kwargs" not in skip_special_params:
args.append(frame.symbols.declare_parameter("kwargs"))
macro_ref.accesses_kwargs = True
if "varargs" in undeclared and "varargs" not in skip_special_params:
args.append(frame.symbols.declare_parameter("varargs"))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
self.writeline(f"{self.func('macro')}({', '.join(args)}):", node)
self.indent()
self.buffer(frame)
self.enter_frame(frame)
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
self.writeline(f"if {ref} is missing:")
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
self.writeline(
f'{ref} = undefined("parameter {arg.name!r} was not provided",'
f" name={arg.name!r})"
)
else:
self.writeline(f"{ref} = ")
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
self.pop_parameter_definitions()
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame, force_unescaped=True)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
return frame, macro_ref
def macro_def(self, macro_ref: MacroRef, frame: Frame) -> None:
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
name = getattr(macro_ref.node, "name", None)
if len(macro_ref.node.args) == 1:
arg_tuple += ","
self.write(
f"Macro(environment, macro, {name!r}, ({arg_tuple}),"
f" {macro_ref.accesses_kwargs!r}, {macro_ref.accesses_varargs!r},"
f" {macro_ref.accesses_caller!r}, context.eval_ctx.autoescape)"
)
def position(self, node: nodes.Node) -> str:
"""Return a human readable position for the node."""
rv = f"line {node.lineno}"
if self.name is not None:
rv = f"{rv} in {self.name!r}"
return rv
def dump_local_context(self, frame: Frame) -> str:
items_kv = ", ".join(
f"{name!r}: {target}"
for name, target in frame.symbols.dump_stores().items()
)
return f"{{{items_kv}}}"
def write_commons(self) -> None:
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
self.writeline("resolve = context.resolve_or_missing")
self.writeline("undefined = environment.undefined")
self.writeline("concat = environment.concat")
# always use the standard Undefined class for the implicit else of
# conditional expressions
self.writeline("cond_expr_undefined = Undefined")
self.writeline("if 0: yield None")
def push_parameter_definitions(self, frame: Frame) -> None:
"""Pushes all parameter targets from the given frame into a local
stack that permits tracking of yet to be assigned parameters. In
particular this enables the optimization from `visit_Name` to skip
undefined expressions for parameters in macros as macros can reference
otherwise unbound parameters.
"""
self._param_def_block.append(frame.symbols.dump_param_targets())
def pop_parameter_definitions(self) -> None:
"""Pops the current parameter definitions set."""
self._param_def_block.pop()
def mark_parameter_stored(self, target: str) -> None:
"""Marks a parameter in the current parameter definitions as stored.
This will skip the enforced undefined checks.
"""
if self._param_def_block:
self._param_def_block[-1].discard(target)
def push_context_reference(self, target: str) -> None:
self._context_reference_stack.append(target)
def pop_context_reference(self) -> None:
self._context_reference_stack.pop()
def get_context_ref(self) -> str:
return self._context_reference_stack[-1]
def get_resolve_func(self) -> str:
target = self._context_reference_stack[-1]
if target == "context":
return "resolve"
return f"{target}.resolve"
def derive_context(self, frame: Frame) -> str:
return f"{self.get_context_ref()}.derived({self.dump_local_context(frame)})"
def parameter_is_undeclared(self, target: str) -> bool:
"""Checks if a given target is an undeclared parameter."""
if not self._param_def_block:
return False
return target in self._param_def_block[-1]
def push_assign_tracking(self) -> None:
"""Pushes a new layer for assignment tracking."""
self._assign_stack.append(set())
def pop_assign_tracking(self, frame: Frame) -> None:
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
if (
not frame.block_frame
and not frame.loop_frame
and not frame.toplevel
or not vars
):
return
public_names = [x for x in vars if x[:1] != "_"]
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
if frame.loop_frame:
self.writeline(f"_loop_vars[{name!r}] = {ref}")
return
if frame.block_frame:
self.writeline(f"_block_vars[{name!r}] = {ref}")
return
self.writeline(f"context.vars[{name!r}] = {ref}")
else:
if frame.loop_frame:
self.writeline("_loop_vars.update({")
elif frame.block_frame:
self.writeline("_block_vars.update({")
else:
self.writeline("context.vars.update({")
for idx, name in enumerate(sorted(vars)):
if idx:
self.write(", ")
ref = frame.symbols.ref(name)
self.write(f"{name!r}: {ref}")
self.write("})")
if not frame.block_frame and not frame.loop_frame and public_names:
if len(public_names) == 1:
self.writeline(f"context.exported_vars.add({public_names[0]!r})")
else:
names_str = ", ".join(map(repr, sorted(public_names)))
self.writeline(f"context.exported_vars.update(({names_str}))")
# -- Statement Visitors
def visit_Template(self, node: nodes.Template, frame: Frame | None = None) -> None:
assert frame is None, "no root frame allowed"
eval_ctx = EvalContext(self.environment, self.name)
from .runtime import async_exported
from .runtime import exported
if self.environment.is_async:
exported_names = sorted(exported + async_exported)
else:
exported_names = sorted(exported)
self.writeline("from jinja2.runtime import " + ", ".join(exported_names))
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = "" if self.defer_init else ", environment=environment"
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail(f"block {block.name!r} defined twice", block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if "." in imp:
module, obj = imp.rsplit(".", 1)
self.writeline(f"from {module} import {obj} as {alias}")
else:
self.writeline(f"import {imp} as {alias}")
# add the load name
self.writeline(f"name = {self.name!r}")
# generate the root render function.
self.writeline(
f"{self.func('root')}(context, missing=missing{envenv}):", extra=1
)
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
if "self" in find_undeclared(node.body, ("self",)):
ref = frame.symbols.declare_parameter("self")
self.writeline(f"{ref} = TemplateReference(context)")
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
self.writeline("parent_template = None")
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline("if parent_template is not None:")
self.indent()
if not self.environment.is_async:
self.writeline("yield from parent_template.root_render_func(context)")
else:
self.writeline("agen = parent_template.root_render_func(context)")
self.writeline("try:")
self.indent()
self.writeline("async for event in agen:")
self.indent()
self.writeline("yield event")
self.outdent()
self.outdent()
self.writeline("finally: await agen.aclose()")
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in self.blocks.items():
self.writeline(
f"{self.func('block_' + name)}(context, missing=missing{envenv}):",
block,
1,
)
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
block_frame.block_frame = True
undeclared = find_undeclared(block.body, ("self", "super"))
if "self" in undeclared:
ref = block_frame.symbols.declare_parameter("self")
self.writeline(f"{ref} = TemplateReference(context)")
if "super" in undeclared:
ref = block_frame.symbols.declare_parameter("super")
self.writeline(f"{ref} = context.super({name!r}, block_{name})")
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.writeline("_block_vars = {}")
self.enter_frame(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
blocks_kv_str = ", ".join(f"{x!r}: block_{x}" for x in self.blocks)
self.writeline(f"blocks = {{{blocks_kv_str}}}", extra=1)
debug_kv_str = "&".join(f"{k}={v}" for k, v in self.debug_info)
self.writeline(f"debug_info = {debug_kv_str!r}")
def visit_Block(self, node: nodes.Block, frame: Frame) -> None:
"""Call a block and register it for the template."""
level = 0
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline("if parent_template is None:")
self.indent()
level += 1
if node.scoped:
context = self.derive_context(frame)
else:
context = self.get_context_ref()
if node.required:
self.writeline(f"if len(context.blocks[{node.name!r}]) <= 1:", node)
self.indent()
self.writeline(
f'raise TemplateRuntimeError("Required block {node.name!r} not found")',
node,
)
self.outdent()
if not self.environment.is_async and frame.buffer is None:
self.writeline(
f"yield from context.blocks[{node.name!r}][0]({context})", node
)
else:
self.writeline(f"gen = context.blocks[{node.name!r}][0]({context})")
self.writeline("try:")
self.indent()
self.writeline(
f"{self.choose_async()}for event in gen:",
node,
)
self.indent()
self.simple_write("event", frame)
self.outdent()
self.outdent()
self.writeline(
f"finally: {self.choose_async('await gen.aclose()', 'gen.close()')}"
)
self.outdent(level)
def visit_Extends(self, node: nodes.Extends, frame: Frame) -> None:
"""Calls the extender."""
if not frame.toplevel:
self.fail("cannot use extend from a non top-level scope", node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline("if parent_template is not None:")
self.indent()
self.writeline('raise TemplateRuntimeError("extended multiple times")')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline("parent_template = environment.get_template(", node)
self.visit(node.template, frame)
self.write(f", {self.name!r})")
self.writeline("for name, parent_block in parent_template.blocks.items():")
self.indent()
self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node: nodes.Include, frame: Frame) -> None:
"""Handles includes."""
if node.ignore_missing:
self.writeline("try:")
self.indent()
func_name = "get_or_select_template"
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, str):
func_name = "get_template"
elif isinstance(node.template.value, (tuple, list)):
func_name = "select_template"
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = "select_template"
self.writeline(f"template = environment.{func_name}(", node)
self.visit(node.template, frame)
self.write(f", {self.name!r})")
if node.ignore_missing:
self.outdent()
self.writeline("except TemplateNotFound:")
self.indent()
self.writeline("pass")
self.outdent()
self.writeline("else:")
self.indent()
def loop_body() -> None:
self.indent()
self.simple_write("event", frame)
self.outdent()
if node.with_context:
self.writeline(
f"gen = template.root_render_func("
"template.new_context(context.get_all(), True,"
f" {self.dump_local_context(frame)}))"
)
self.writeline("try:")
self.indent()
self.writeline(f"{self.choose_async()}for event in gen:")
loop_body()
self.outdent()
self.writeline(
f"finally: {self.choose_async('await gen.aclose()', 'gen.close()')}"
)
elif self.environment.is_async:
self.writeline(
"for event in (await template._get_default_module_async())"
"._body_stream:"
)
loop_body()
else:
self.writeline("yield from template._get_default_module()._body_stream")
if node.ignore_missing:
self.outdent()
def _import_common(
self, node: nodes.Import | nodes.FromImport, frame: Frame
) -> None:
self.write(f"{self.choose_async('await ')}environment.get_template(")
self.visit(node.template, frame)
self.write(f", {self.name!r}).")
if node.with_context:
f_name = f"make_module{self.choose_async('_async')}"
self.write(
f"{f_name}(context.get_all(), True, {self.dump_local_context(frame)})"
)
else:
self.write(f"_get_default_module{self.choose_async('_async')}(context)")
def visit_Import(self, node: nodes.Import, frame: Frame) -> None:
"""Visit regular imports."""
self.writeline(f"{frame.symbols.ref(node.target)} = ", node)
if frame.toplevel:
self.write(f"context.vars[{node.target!r}] = ")
self._import_common(node, frame)
if frame.toplevel and not node.target.startswith("_"):
self.writeline(f"context.exported_vars.discard({node.target!r})")
def visit_FromImport(self, node: nodes.FromImport, frame: Frame) -> None:
"""Visit named imports."""
self.newline(node)
self.write("included_template = ")
self._import_common(node, frame)
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline(
f"{frame.symbols.ref(alias)} ="
f" getattr(included_template, {name!r}, missing)"
)
self.writeline(f"if {frame.symbols.ref(alias)} is missing:")
self.indent()
# The position will contain the template name, and will be formatted
# into a string that will be compiled into an f-string. Curly braces
# in the name must be replaced with escapes so that they will not be
# executed as part of the f-string.
position = self.position(node).replace("{", "{{").replace("}", "}}")
message = (
"the template {included_template.__name__!r}"
f" (imported on {position})"
f" does not export the requested name {name!r}"
)
self.writeline(
f"{frame.symbols.ref(alias)} = undefined(f{message!r}, name={name!r})"
)
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith("_"):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline(f"context.vars[{name!r}] = {frame.symbols.ref(name)}")
else:
names_kv = ", ".join(
f"{name!r}: {frame.symbols.ref(name)}" for name in var_names
)
self.writeline(f"context.vars.update({{{names_kv}}})")
if discarded_names:
if len(discarded_names) == 1:
self.writeline(f"context.exported_vars.discard({discarded_names[0]!r})")
else:
names_str = ", ".join(map(repr, discarded_names))
self.writeline(
f"context.exported_vars.difference_update(({names_str}))"
)
def visit_For(self, node: nodes.For, frame: Frame) -> None:
loop_frame = frame.inner()
loop_frame.loop_frame = True
test_frame = frame.inner()
else_frame = frame.inner()
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body if the body is a scoped block.
extended_loop = (
node.recursive
or "loop"
in find_undeclared(node.iter_child_nodes(only=("body",)), ("loop",))
or any(block.scoped for block in node.find_all(nodes.Block))
)
loop_ref = None
if extended_loop:
loop_ref = loop_frame.symbols.declare_parameter("loop")
loop_frame.symbols.analyze_node(node, for_branch="body")
if node.else_:
else_frame.symbols.analyze_node(node, for_branch="else")
if node.test:
loop_filter_func = self.temporary_identifier()
test_frame.symbols.analyze_node(node, for_branch="test")
self.writeline(f"{self.func(loop_filter_func)}(fiter):", node.test)
self.indent()
self.enter_frame(test_frame)
self.writeline(self.choose_async("async for ", "for "))
self.visit(node.target, loop_frame)
self.write(" in ")
self.write(self.choose_async("auto_aiter(fiter)", "fiter"))
self.write(":")
self.indent()
self.writeline("if ", node.test)
self.visit(node.test, test_frame)
self.write(":")
self.indent()
self.writeline("yield ")
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
self.writeline(
f"{self.func('loop')}(reciter, loop_render_func, depth=0):", node
)
self.indent()
self.buffer(loop_frame)
# Use the same buffer for the else frame
else_frame.buffer = loop_frame.buffer
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline(f"{loop_ref} = missing")
for name in node.find_all(nodes.Name):
if name.ctx == "store" and name.name == "loop":
self.fail(
"Can't assign to special loop variable in for-loop target",
name.lineno,
)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline(f"{iteration_indicator} = 1")
self.writeline(self.choose_async("async for ", "for "), node)
self.visit(node.target, loop_frame)
if extended_loop:
self.write(f", {loop_ref} in {self.choose_async('Async')}LoopContext(")
else:
self.write(" in ")
if node.test:
self.write(f"{loop_filter_func}(")
if node.recursive:
self.write("reciter")
else:
if self.environment.is_async and not extended_loop:
self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
self.write(")")
if node.test:
self.write(")")
if node.recursive:
self.write(", undefined, loop_render_func, depth):")
else:
self.write(", undefined):" if extended_loop else ":")
self.indent()
self.enter_frame(loop_frame)
self.writeline("_loop_vars = {}")
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline(f"{iteration_indicator} = 0")
self.outdent()
self.leave_frame(
loop_frame, with_python_scope=node.recursive and not node.else_
)
if node.else_:
self.writeline(f"if {iteration_indicator}:")
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
self.leave_frame(else_frame)
self.outdent()
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write(f"{self.choose_async('await ')}loop(")
if self.environment.is_async:
self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async:
self.write(")")
self.write(", loop)")
self.end_write(frame)
# at the end of the iteration, clear any assignments made in the
# loop from the top level
if self._assign_stack:
self._assign_stack[-1].difference_update(loop_frame.symbols.stores)
def visit_If(self, node: nodes.If, frame: Frame) -> None:
if_frame = frame.soft()
self.writeline("if ", node)
self.visit(node.test, if_frame)
self.write(":")
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
for elif_ in node.elif_:
self.writeline("elif ", elif_)
self.visit(elif_.test, if_frame)
self.write(":")
self.indent()
self.blockvisit(elif_.body, if_frame)
self.outdent()
if node.else_:
self.writeline("else:")
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node: nodes.Macro, frame: Frame) -> None:
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith("_"):
self.write(f"context.exported_vars.add({node.name!r})")
self.writeline(f"context.vars[{node.name!r}] = ")
self.write(f"{frame.symbols.ref(node.name)} = ")
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node: nodes.CallBlock, frame: Frame) -> None:
call_frame, macro_ref = self.macro_body(node, frame)
self.writeline("caller = ")
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node: nodes.FilterBlock, frame: Frame) -> None:
filter_frame = frame.inner()
filter_frame.symbols.analyze_node(node)
self.enter_frame(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.leave_frame(filter_frame)
def visit_With(self, node: nodes.With, frame: Frame) -> None:
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
for target, expr in zip(node.targets, node.values, strict=False):
self.newline()
self.visit(target, with_frame)
self.write(" = ")
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
def visit_ExprStmt(self, node: nodes.ExprStmt, frame: Frame) -> None:
self.newline(node)
self.visit(node.node, frame)
class _FinalizeInfo(t.NamedTuple):
const: t.Callable[..., str] | None
src: str | None
@staticmethod
def _default_finalize(value: t.Any) -> t.Any:
"""The default finalize function if the environment isn't
configured with one. Or, if the environment has one, this is
called on that function's output for constants.
"""
return str(value)
_finalize: _FinalizeInfo | None = None
def _make_finalize(self) -> _FinalizeInfo:
"""Build the finalize function to be used on constants and at
runtime. Cached so it's only created once for all output nodes.
Returns a ``namedtuple`` with the following attributes:
``const``
A function to finalize constant data at compile time.
``src``
Source code to output around nodes to be evaluated at
runtime.
"""
if self._finalize is not None:
return self._finalize
finalize: t.Callable[..., t.Any] | None
finalize = default = self._default_finalize
src = None
if self.environment.finalize:
src = "environment.finalize("
env_finalize = self.environment.finalize
pass_arg = {
_PassArg.context: "context",
_PassArg.eval_context: "context.eval_ctx",
_PassArg.environment: "environment",
}.get(
_PassArg.from_obj(env_finalize) # type: ignore
)
finalize = None
if pass_arg is None:
def finalize(value: t.Any) -> t.Any: # noqa: F811
return default(env_finalize(value))
else:
src = f"{src}{pass_arg}, "
if pass_arg == "environment":
def finalize(value: t.Any) -> t.Any: # noqa: F811
return default(env_finalize(self.environment, value))
self._finalize = self._FinalizeInfo(finalize, src)
return self._finalize
def _output_const_repr(self, group: t.Iterable[t.Any]) -> str:
"""Given a group of constant values converted from ``Output``
child nodes, produce a string to write to the template module
source.
"""
return repr(concat(group))
def _output_child_to_const(
self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
) -> str:
"""Try to optimize a child of an ``Output`` node by trying to
convert it to constant, finalized data at compile time.
If :exc:`Impossible` is raised, the node is not constant and
will be evaluated at runtime. Any other exception will also be
evaluated at runtime for easier debugging.
"""
const = node.as_const(frame.eval_ctx)
if frame.eval_ctx.autoescape:
const = escape(const)
# Template data doesn't go through finalize.
if isinstance(node, nodes.TemplateData):
return str(const)
return finalize.const(const) # type: ignore
def _output_child_pre(
self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
) -> None:
"""Output extra source code before visiting a child of an
``Output`` node.
"""
if frame.eval_ctx.volatile:
self.write("(escape if context.eval_ctx.autoescape else str)(")
elif frame.eval_ctx.autoescape:
self.write("escape(")
else:
self.write("str(")
if finalize.src is not None:
self.write(finalize.src)
def _output_child_post(
self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo
) -> None:
"""Output extra source code after visiting a child of an
``Output`` node.
"""
self.write(")")
if finalize.src is not None:
self.write(")")
def visit_Output(self, node: nodes.Output, frame: Frame) -> None:
# If an extends is active, don't render outside a block.
if frame.require_output_check:
# A top-level extends is known to exist at compile time.
if self.has_known_extends:
return
self.writeline("if parent_template is None:")
self.indent()
finalize = self._make_finalize()
body: list[list[t.Any] | nodes.Expr] = []
# Evaluate constants at compile time if possible. Each item in
# body will be either a list of static data or a node to be
# evaluated at runtime.
for child in node.nodes:
try:
if not (
# If the finalize function requires runtime context,
# constants can't be evaluated at compile time.
finalize.const
# Unless it's basic template data that won't be
# finalized anyway.
or isinstance(child, nodes.TemplateData)
):
raise nodes.Impossible()
const = self._output_child_to_const(child, frame, finalize)
except (nodes.Impossible, Exception):
# The node was not constant and needs to be evaluated at
# runtime. Or another error was raised, which is easier
# to debug at runtime.
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
if frame.buffer is not None:
if len(body) == 1:
self.writeline(f"{frame.buffer}.append(")
else:
self.writeline(f"{frame.buffer}.extend((")
self.indent()
for item in body:
if isinstance(item, list):
# A group of constant data to join and output.
val = self._output_const_repr(item)
if frame.buffer is None:
self.writeline("yield " + val)
else:
self.writeline(val + ",")
else:
if frame.buffer is None:
self.writeline("yield ", item)
else:
self.newline(item)
# A node to be evaluated at runtime.
self._output_child_pre(item, frame, finalize)
self.visit(item, frame)
self._output_child_post(item, frame, finalize)
if frame.buffer is not None:
self.write(",")
if frame.buffer is not None:
self.outdent()
self.writeline(")" if len(body) == 1 else "))")
if frame.require_output_check:
self.outdent()
def visit_Assign(self, node: nodes.Assign, frame: Frame) -> None:
self.push_assign_tracking()
# ``a.b`` is allowed for assignment, and is parsed as an NSRef. However,
# it is only valid if it references a Namespace object. Emit a check for
# that for each ref here, before assignment code is emitted. This can't
# be done in visit_NSRef as the ref could be in the middle of a tuple.
seen_refs: set[str] = set()
for nsref in node.find_all(nodes.NSRef):
if nsref.name in seen_refs:
# Only emit the check for each reference once, in case the same
# ref is used multiple times in a tuple, `ns.a, ns.b = c, d`.
continue
seen_refs.add(nsref.name)
ref = frame.symbols.ref(nsref.name)
self.writeline(f"if not isinstance({ref}, Namespace):")
self.indent()
self.writeline(
"raise TemplateRuntimeError"
'("cannot assign attribute on non-namespace object")'
)
self.outdent()
self.newline(node)
self.visit(node.target, frame)
self.write(" = ")
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
def visit_AssignBlock(self, node: nodes.AssignBlock, frame: Frame) -> None:
self.push_assign_tracking()
block_frame = frame.inner()
# This is a special case. Since a set block always captures we
# will disable output checks. This way one can use set blocks
# toplevel even in extended templates.
block_frame.require_output_check = False
block_frame.symbols.analyze_node(node)
self.enter_frame(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
if node.filter is not None:
self.visit_Filter(node.filter, block_frame)
else:
self.write(f"concat({block_frame.buffer})")
self.write(")")
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node: nodes.Name, frame: Frame) -> None:
if node.ctx == "store" and (
frame.toplevel or frame.loop_frame or frame.block_frame
):
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
if node.ctx == "load":
load = frame.symbols.find_load(ref)
if not (
load is not None
and load[0] == VAR_LOAD_PARAMETER
and not self.parameter_is_undeclared(ref)
):
self.write(
f"(undefined(name={node.name!r}) if {ref} is missing else {ref})"
)
return
self.write(ref)
def visit_NSRef(self, node: nodes.NSRef, frame: Frame) -> None:
# NSRef is a dotted assignment target a.b=c, but uses a[b]=c internally.
# visit_Assign emits code to validate that each ref is to a Namespace
# object only. That can't be emitted here as the ref could be in the
# middle of a tuple assignment.
ref = frame.symbols.ref(node.name)
self.writeline(f"{ref}[{node.attr!r}]")
def visit_Const(self, node: nodes.Const, frame: Frame) -> None:
val = node.as_const(frame.eval_ctx)
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node: nodes.TemplateData, frame: Frame) -> None:
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write(
f"(Markup if context.eval_ctx.autoescape else identity)({node.data!r})"
)
def visit_Tuple(self, node: nodes.Tuple, frame: Frame) -> None:
self.write("(")
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item, frame)
self.write(",)" if idx == 0 else ")")
def visit_List(self, node: nodes.List, frame: Frame) -> None:
self.write("[")
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item, frame)
self.write("]")
def visit_Dict(self, node: nodes.Dict, frame: Frame) -> None:
self.write("{")
for idx, item in enumerate(node.items):
if idx:
self.write(", ")
self.visit(item.key, frame)
self.write(": ")
self.visit(item.value, frame)
self.write("}")
visit_Add = _make_binop("+")
visit_Sub = _make_binop("-")
visit_Mul = _make_binop("*")
visit_Div = _make_binop("/")
visit_FloorDiv = _make_binop("//")
visit_Pow = _make_binop("**")
visit_Mod = _make_binop("%")
visit_And = _make_binop("and")
visit_Or = _make_binop("or")
visit_Pos = _make_unop("+")
visit_Neg = _make_unop("-")
visit_Not = _make_unop("not ")
@optimizeconst
def visit_Concat(self, node: nodes.Concat, frame: Frame) -> None:
if frame.eval_ctx.volatile:
func_name = "(markup_join if context.eval_ctx.volatile else str_join)"
elif frame.eval_ctx.autoescape:
func_name = "markup_join"
else:
func_name = "str_join"
self.write(f"{func_name}((")
for arg in node.nodes:
self.visit(arg, frame)
self.write(", ")
self.write("))")
@optimizeconst
def visit_Compare(self, node: nodes.Compare, frame: Frame) -> None:
self.write("(")
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
self.write(")")
def visit_Operand(self, node: nodes.Operand, frame: Frame) -> None:
self.write(f" {operators[node.op]} ")
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node: nodes.Getattr, frame: Frame) -> None:
if self.environment.is_async:
self.write("(await auto_await(")
self.write("environment.getattr(")
self.visit(node.node, frame)
self.write(f", {node.attr!r})")
if self.environment.is_async:
self.write("))")
@optimizeconst
def visit_Getitem(self, node: nodes.Getitem, frame: Frame) -> None:
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write("[")
self.visit(node.arg, frame)
self.write("]")
else:
if self.environment.is_async:
self.write("(await auto_await(")
self.write("environment.getitem(")
self.visit(node.node, frame)
self.write(", ")
self.visit(node.arg, frame)
self.write(")")
if self.environment.is_async:
self.write("))")
def visit_Slice(self, node: nodes.Slice, frame: Frame) -> None:
if node.start is not None:
self.visit(node.start, frame)
self.write(":")
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(":")
self.visit(node.step, frame)
@contextmanager
def _filter_test_common(
self, node: nodes.Filter | nodes.Test, frame: Frame, is_filter: bool
) -> t.Iterator[None]:
if self.environment.is_async:
self.write("(await auto_await(")
if is_filter:
self.write(f"{self.filters[node.name]}(")
func = self.environment.filters.get(node.name)
else:
self.write(f"{self.tests[node.name]}(")
func = self.environment.tests.get(node.name)
# When inside an If or CondExpr frame, allow the filter to be
# undefined at compile time and only raise an error if it's
# actually called at runtime. See pull_dependencies.
if func is None and not frame.soft_frame:
type_name = "filter" if is_filter else "test"
self.fail(f"No {type_name} named {node.name!r}.", node.lineno)
pass_arg = {
_PassArg.context: "context",
_PassArg.eval_context: "context.eval_ctx",
_PassArg.environment: "environment",
}.get(
_PassArg.from_obj(func) # type: ignore
)
if pass_arg is not None:
self.write(f"{pass_arg}, ")
# Back to the visitor function to handle visiting the target of
# the filter or test.
yield
self.signature(node, frame)
self.write(")")
if self.environment.is_async:
self.write("))")
@optimizeconst
def visit_Filter(self, node: nodes.Filter, frame: Frame) -> None:
with self._filter_test_common(node, frame, True):
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write(
f"(Markup(concat({frame.buffer}))"
f" if context.eval_ctx.autoescape else concat({frame.buffer}))"
)
elif frame.eval_ctx.autoescape:
self.write(f"Markup(concat({frame.buffer}))")
else:
self.write(f"concat({frame.buffer})")
@optimizeconst
def visit_Test(self, node: nodes.Test, frame: Frame) -> None:
with self._filter_test_common(node, frame, False):
self.visit(node.node, frame)
@optimizeconst
def visit_CondExpr(self, node: nodes.CondExpr, frame: Frame) -> None:
frame = frame.soft()
def write_expr2() -> None:
if node.expr2 is not None:
self.visit(node.expr2, frame)
return
self.write(
f'cond_expr_undefined("the inline if-expression on'
f" {self.position(node)} evaluated to false and no else"
f' section was defined.")'
)
self.write("(")
self.visit(node.expr1, frame)
self.write(" if ")
self.visit(node.test, frame)
self.write(" else ")
write_expr2()
self.write(")")
@optimizeconst
def visit_Call(
self, node: nodes.Call, frame: Frame, forward_caller: bool = False
) -> None:
if self.environment.is_async:
self.write("(await auto_await(")
if self.environment.sandboxed:
self.write("environment.call(context, ")
else:
self.write("context.call(")
self.visit(node.node, frame)
extra_kwargs = {"caller": "caller"} if forward_caller else None
loop_kwargs = {"_loop_vars": "_loop_vars"} if frame.loop_frame else {}
block_kwargs = {"_block_vars": "_block_vars"} if frame.block_frame else {}
if extra_kwargs:
extra_kwargs.update(loop_kwargs, **block_kwargs)
elif loop_kwargs or block_kwargs:
extra_kwargs = dict(loop_kwargs, **block_kwargs)
self.signature(node, frame, extra_kwargs)
self.write(")")
if self.environment.is_async:
self.write("))")
def visit_Keyword(self, node: nodes.Keyword, frame: Frame) -> None:
self.write(node.key + "=")
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node: nodes.MarkSafe, frame: Frame) -> None:
self.write("Markup(")
self.visit(node.expr, frame)
self.write(")")
def visit_MarkSafeIfAutoescape(
self, node: nodes.MarkSafeIfAutoescape, frame: Frame
) -> None:
self.write("(Markup if context.eval_ctx.autoescape else identity)(")
self.visit(node.expr, frame)
self.write(")")
def visit_EnvironmentAttribute(
self, node: nodes.EnvironmentAttribute, frame: Frame
) -> None:
self.write("environment." + node.name)
def visit_ExtensionAttribute(
self, node: nodes.ExtensionAttribute, frame: Frame
) -> None:
self.write(f"environment.extensions[{node.identifier!r}].{node.name}")
def visit_ImportedName(self, node: nodes.ImportedName, frame: Frame) -> None:
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node: nodes.InternalName, frame: Frame) -> None:
self.write(node.name)
def visit_ContextReference(
self, node: nodes.ContextReference, frame: Frame
) -> None:
self.write("context")
def visit_DerivedContextReference(
self, node: nodes.DerivedContextReference, frame: Frame
) -> None:
self.write(self.derive_context(frame))
def visit_Continue(self, node: nodes.Continue, frame: Frame) -> None:
self.writeline("continue", node)
def visit_Break(self, node: nodes.Break, frame: Frame) -> None:
self.writeline("break", node)
def visit_Scope(self, node: nodes.Scope, frame: Frame) -> None:
scope_frame = frame.inner()
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
def visit_OverlayScope(self, node: nodes.OverlayScope, frame: Frame) -> None:
ctx = self.temporary_identifier()
self.writeline(f"{ctx} = {self.derive_context(frame)}")
self.writeline(f"{ctx}.vars = ")
self.visit(node.context, frame)
self.push_context_reference(ctx)
scope_frame = frame.inner(isolated=True)
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
self.pop_context_reference()
def visit_EvalContextModifier(
self, node: nodes.EvalContextModifier, frame: Frame
) -> None:
for keyword in node.options:
self.writeline(f"context.eval_ctx.{keyword.key} = ")
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(
self, node: nodes.ScopedEvalContextModifier, frame: Frame
) -> None:
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
self.writeline(f"{old_ctx_name} = context.eval_ctx.save()")
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
self.writeline(f"context.eval_ctx.revert({old_ctx_name})")
| CodeGenerator |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/toggle_style_order.py | {
"start": 74,
"end": 414
} | class ____(App):
CSS = """
Checkbox > .toggle--label, Label {
color: white;
text-opacity: 50%;
}
"""
def compose(self):
yield Checkbox("[red bold]This is just[/] some text.")
yield Label("[bold red]This is just[/] some text.")
if __name__ == "__main__":
CheckboxApp().run()
| CheckboxApp |
python | viewflow__viewflow | viewflow/views/list.py | {
"start": 6697,
"end": 10795
} | class ____(object):
ordering = None
ordering_kwarg = "_orderby"
def get_ordering(self):
"""Return the field or fields to use for ordering the queryset."""
ordering = []
# url query parameter
if self.ordering_kwarg in self.request.GET:
params = self.request.GET[self.ordering_kwarg].split(",")
for param in params:
_, prefix, param_name = param.rpartition("-")
column_def = self.list_columns.get(param_name)
if column_def:
column_ordering = column_def.orderby()
if column_ordering:
if hasattr(column_ordering, "as_sql"):
ordering.append(
column_ordering.desc()
if prefix == "-"
else column_ordering.asc()
)
elif column_ordering.startswith("-") and prefix == "-":
ordering.append(column_ordering[1:])
else:
ordering.append(prefix + column_ordering)
else:
# default view ordering
if isinstance(self.ordering, (list, tuple)):
ordering.extend(self.ordering)
elif isinstance(self.ordering, str):
ordering.append(self.ordering)
# default queryset order
if self.queryset is not None:
ordering.extend(self.queryset.query.order_by)
return ordering
@cached_property
def columns_order(self):
"""Return list of columns used to order the queryset."""
ordering = {}
# ordered by the url query
if self.ordering_kwarg in self.request.GET:
params = self.request.GET[self.ordering_kwarg].split(",")
for param in params:
_, param_prefix, param_name = param.rpartition("-")
column_def = self.list_columns.get(param_name)
if column_def:
column_ordering = column_def.orderby()
if column_ordering is not None and isinstance(column_ordering, str):
# TODO support custom OrderBy expressions
(
_,
column_order_prefix,
column_orderby,
) = column_ordering.rpartition("-")
ordering[column_def] = (
"asc" if column_order_prefix == param_prefix else "desc"
)
else:
# ordered by explicit self.ordering definition or by queryset.order_by
raw_ordering = []
if isinstance(self.ordering, (list, tuple)):
raw_ordering.extend(self.ordering)
elif isinstance(self.ordering, str):
raw_ordering.append(self.ordering)
if self.queryset is not None:
raw_ordering.extend(self.queryset.query.order_by)
for param in raw_ordering:
_, param_prefix, param_name = param.rpartition("-")
for column_def in self.list_columns.values():
if column_def in ordering: # column order already found
continue
column_ordering = column_def.orderby()
if column_ordering is not None and isinstance(column_ordering, str):
# TODO support custom OrderBy expressions
(
_,
column_order_prefix,
column_orderby,
) = column_ordering.rpartition("-")
if param_name == column_orderby:
ordering[column_def] = (
"asc" if column_order_prefix == param_prefix else "desc"
)
return ordering
| OrderableListViewMixin |
python | pytransitions__transitions | transitions/core.py | {
"start": 15388,
"end": 20543
} | class ____(object):
"""A collection of transitions assigned to the same trigger
"""
def __init__(self, name, machine):
"""
Args:
name (str): The name of the event, which is also the name of the
triggering callable (e.g., 'advance' implies an advance()
method).
machine (Machine): The current Machine instance.
"""
self.name = name
self.machine = machine
self.transitions = defaultdict(list)
def add_transition(self, transition):
"""Add a transition to the list of potential transitions.
Args:
transition (Transition): The Transition instance to add to the
list.
"""
self.transitions[transition.source].append(transition)
def trigger(self, model, *args, **kwargs):
"""Executes all transitions that match the current state,
halting as soon as one successfully completes. More precisely, it prepares a partial
of the internal ``_trigger`` function, passes this to ``Machine._process``.
It is up to the machine's configuration of the Event whether processing happens queued (sequentially) or
whether further Events are processed as they occur.
Args:
model (object): The currently processed model
args and kwargs: Optional positional or named arguments that will
be passed onto the EventData object, enabling arbitrary state
information to be passed on to downstream triggered functions.
Returns: boolean indicating whether a transition was
successfully executed (True if successful, False if not).
"""
func = partial(self._trigger, EventData(None, self, self.machine, model, args=args, kwargs=kwargs))
# pylint: disable=protected-access
# noinspection PyProtectedMember
# Machine._process should not be called somewhere else. That's why it should not be exposed
# to Machine users.
return self.machine._process(func)
def _trigger(self, event_data):
"""Internal trigger function called by the ``Machine`` instance. This should not
be called directly but via the public method ``Machine.process``.
Args:
event_data (EventData): The currently processed event. State, result and (potentially) error might be
overridden.
Returns: boolean indicating whether a transition was
successfully executed (True if successful, False if not).
"""
event_data.state = self.machine.get_model_state(event_data.model)
try:
if self._is_valid_source(event_data.state):
self._process(event_data)
except BaseException as err: # pylint: disable=broad-except; Exception will be handled elsewhere
event_data.error = err
if self.machine.on_exception:
self.machine.callbacks(self.machine.on_exception, event_data)
else:
raise
finally:
try:
self.machine.callbacks(self.machine.finalize_event, event_data)
_LOGGER.debug("%sExecuted machine finalize callbacks", self.machine.name)
except BaseException as err: # pylint: disable=broad-except; Exception will be handled elsewhere
_LOGGER.error("%sWhile executing finalize callbacks a %s occurred: %s.",
self.machine.name,
type(err).__name__,
str(err))
return event_data.result
def _process(self, event_data):
self.machine.callbacks(self.machine.prepare_event, event_data)
_LOGGER.debug("%sExecuted machine preparation callbacks before conditions.", self.machine.name)
for trans in self.transitions[event_data.state.name]:
event_data.transition = trans
if trans.execute(event_data):
event_data.result = True
break
def _is_valid_source(self, state):
if state.name not in self.transitions:
msg = "%sCan't trigger event %s from state %s!" % (self.machine.name, self.name,
state.name)
ignore = state.ignore_invalid_triggers if state.ignore_invalid_triggers is not None \
else self.machine.ignore_invalid_triggers
if ignore:
_LOGGER.warning(msg)
return False
raise MachineError(msg)
return True
def __repr__(self):
return "<%s('%s')@%s>" % (type(self).__name__, self.name, id(self))
def add_callback(self, trigger, func):
"""Add a new before or after callback to all available transitions.
Args:
trigger (str): The type of triggering event. Must be one of
'before', 'after' or 'prepare'.
func (str): The name of the callback function.
"""
for trans in itertools.chain(*self.transitions.values()):
trans.add_callback(trigger, func)
| Event |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/argparsing/parsers.py | {
"start": 19252,
"end": 20603
} | class ____(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers consisting of a left and right argument parser, with input separated by a delimiter."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = self.create_namespace()
state.set_namespace(namespace)
with state.delimit(self.delimiter, self.required) as boundary: # type: ParserBoundary
choice = self.get_left_parser(state).parse(state)
if boundary.match:
self.get_right_parser(choice).parse(state)
return namespace
@property
def required(self) -> bool:
"""True if the delimiter (and thus right parser) is required, otherwise False."""
return False
@property
def delimiter(self) -> str:
"""The delimiter to use between the left and right parser."""
return PAIR_DELIMITER
@abc.abstractmethod
def create_namespace(self) -> t.Any:
"""Create and return a namespace."""
@abc.abstractmethod
def get_left_parser(self, state: ParserState) -> Parser:
"""Return the parser for the left side."""
@abc.abstractmethod
def get_right_parser(self, choice: t.Any) -> Parser:
"""Return the parser for the right side."""
| PairParser |
python | pandas-dev__pandas | pandas/core/arrays/arrow/accessors.py | {
"start": 530,
"end": 1438
} | class ____(metaclass=ABCMeta):
@abstractmethod
def __init__(self, data, validation_msg: str) -> None:
self._data = data
self._validation_msg = validation_msg
self._validate(data)
@abstractmethod
def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
pass
def _validate(self, data) -> None:
dtype = data.dtype
if not HAS_PYARROW or not isinstance(dtype, ArrowDtype):
# Raise AttributeError so that inspect can handle non-struct Series.
raise AttributeError(self._validation_msg.format(dtype=dtype))
if not self._is_valid_pyarrow_dtype(dtype.pyarrow_dtype):
# Raise AttributeError so that inspect can handle invalid Series.
raise AttributeError(self._validation_msg.format(dtype=dtype))
@property
def _pa_array(self):
return self._data.array._pa_array
| ArrowAccessor |
python | getsentry__sentry | tests/sentry/api/serializers/test_event.py | {
"start": 12506,
"end": 14775
} | class ____(TestCase):
def test_user(self) -> None:
"""
Use the SimpleEventSerializer to serialize an event
"""
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"user": {"email": "test@test.com"},
},
project_id=self.project.id,
)
assert event.group is not None
result = serialize(event, None, SimpleEventSerializer())
assert result["eventID"] == event.event_id
assert result["projectID"] == str(event.project_id)
assert result["groupID"] == str(event.group.id)
assert result["message"] == event.message
assert result["title"] == event.title
assert result["location"] == event.location
assert result["culprit"] == event.culprit
assert result["dateCreated"] == event.datetime
assert result["user"]["id"] == event.get_minimal_user().id
assert result["user"]["email"] == event.get_minimal_user().email
assert result["user"]["username"] == event.get_minimal_user().username
assert result["user"]["ip_address"] == event.get_minimal_user().ip_address
assert result["tags"] == [
{"key": "level", "value": "error"},
{"key": "user", "value": "email:test@test.com", "query": 'user.email:"test@test.com"'},
]
def test_no_group(self) -> None:
"""
Use the SimpleEventSerializer to serialize an event without group
"""
event = self.store_event(
data={
"event_id": "a" * 32,
"start_timestamp": before_now(minutes=1, seconds=5).isoformat(),
"timestamp": before_now(minutes=1).isoformat(),
"user": {"email": "test@test.com"},
"type": "transaction",
"transaction": "api.issue.delete",
"spans": [],
"contexts": {"trace": {"op": "foobar", "trace_id": "a" * 32, "span_id": "a" * 16}},
},
project_id=self.project.id,
)
result = serialize(event, None, SimpleEventSerializer())
assert result["groupID"] is None
| SimpleEventSerializerTest |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 67820,
"end": 68915
} | class ____(TypedDict, total=False):
"""
:class:`altair.BindRadioSelect` ``TypedDict`` wrapper.
Parameters
----------
input
options
An array of options to select from.
debounce
If defined, delays event handling until the specified milliseconds have elapsed
since the last event was fired.
element
An optional CSS selector string indicating the parent element to which the input
element should be added. By default, all input elements are added within the parent
container of the Vega view.
labels
An array of label strings to represent the ``options`` values. If unspecified, the
``options`` value will be coerced to a string and used as the label.
name
By default, the signal name is used to label input elements. This ``name`` property
can be used instead to specify a custom label for the bound signal.
"""
input: Literal["radio", "select"]
options: Sequence[Any]
debounce: float
element: str
labels: Sequence[str]
name: str
| BindRadioSelectKwds |
python | openai__openai-python | src/openai/types/responses/response_input_item_param.py | {
"start": 11674,
"end": 12135
} | class ____(TypedDict, total=False):
id: Required[str]
"""The unique ID of the list."""
server_label: Required[str]
"""The label of the MCP server."""
tools: Required[Iterable[McpListToolsTool]]
"""The tools available on the server."""
type: Required[Literal["mcp_list_tools"]]
"""The type of the item. Always `mcp_list_tools`."""
error: Optional[str]
"""Error message if the server could not list tools."""
| McpListTools |
python | wandb__wandb | tests/unit_tests/test_artifacts/test_wandb_artifacts.py | {
"start": 17742,
"end": 20395
} | class ____:
"""Wrap a file as a Opener."""
def __init__(self, file: IO):
self.file = file
def __call__(self, mode: str = "r") -> ContextManager[IO]:
@contextmanager
def _fake_context():
yield self.file
return _fake_context()
def test_artifact_multipart_download_network_error():
# Disable retries and backoff to avoid timeout in test
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=0)
session.mount("http://", adapter)
session.mount("https://", adapter)
class CountOnlyFile(IO):
def __init__(self):
self.write_count = 0
self.seek_count = 0
def seek(self, offset: int, whence: int = 0) -> int:
self.seek_count += 1
return offset
def write(self, s: AnyStr) -> int:
self.write_count += 1
return len(s)
file = CountOnlyFile()
opener = MockOpener(file)
with pytest.raises(requests.exceptions.ConnectionError):
with ThreadPoolExecutor(max_workers=2) as executor:
multipart_download(
executor, session, "https://invalid.com", 4 * 1024 * 1024 * 1024, opener
)
assert file.seek_count == 0
assert file.write_count == 0
def test_artifact_multipart_download_disk_error():
class ThrowFile(IO):
def seek(self, offset: int, whence: int = 0) -> int:
raise ValueError("I/O operation on closed file")
class MockResponse:
def raise_for_status(self):
pass
def iter_content(self, chunk_size: int = 1024):
return [b"test"]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
class MockSession:
def __init__(self):
self.get_count = 0
def get(self, url: str, stream: bool = False, headers: dict = None):
self.get_count += 1
return MockResponse()
session = MockSession()
file = ThrowFile()
opener = MockOpener(file)
with pytest.raises(ValueError):
with ThreadPoolExecutor(max_workers=2) as executor:
multipart_download(
executor,
session,
"https://mocked.com",
500 * 1024 * 1024, # 500MB should have 5 parts
opener,
)
# After first get call has errors, reamining get call should return without making the call.
# It can be 5 depends on underlying environment,e.g. it fails on winodws from time to time.
assert session.get_count <= 5
| MockOpener |
python | pola-rs__polars | py-polars/src/polars/_typing.py | {
"start": 9857,
"end": 9986
} | class ____(Protocol):
def cursor(self, *args: Any, **kwargs: Any) -> Any:
"""Return a cursor object."""
| BasicConnection |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/dml.py | {
"start": 10841,
"end": 10960
} | class ____(OnConflictClause):
__visit_name__ = "on_conflict_do_nothing"
inherit_cache = True
| OnConflictDoNothing |
python | huggingface__transformers | src/transformers/quantizers/quantizer_vptq.py | {
"start": 986,
"end": 3600
} | class ____(HfQuantizer):
"""
Quantizer of the VPTQ method. Enables the loading of prequantized models.
"""
requires_calibration = True
required_packages = ["vptq"]
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Using `vptq` quantization requires Accelerate: `pip install accelerate`")
if not is_vptq_available():
raise ImportError("Using `vptq` quantization requires VPTQ>=0.0.4: `pip install -U vptq`")
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
if torch.cuda.is_available():
dtype = torch.float16
logger.info(
"CUDA available. Assuming VPTQ inference on GPU and loading the model in `torch.float16`. To overwrite it, set `dtype` manually."
)
else:
import vptq
device_availability = getattr(vptq, "device_availability", lambda device: False)
if device_availability("cpu") is True:
raise RuntimeError("No GPU found. Please wait for the next release of VPTQ to use CPU inference")
dtype = torch.float32
logger.info("No GPU found. Assuming VPTQ inference on CPU and loading the model in `torch.float32`.")
return dtype
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: list[str] | None = None,
**kwargs,
):
"""
we don't have param like modules_to_not_convert to indicate which layers should not be quantized
because `quantization_config` include the layers that should be quantized
"""
from ..integrations import replace_with_vptq_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
replace_with_vptq_linear(
model,
quantization_config=self.quantization_config,
modules_to_not_convert=self.modules_to_not_convert,
)
model.config.quantization_config = self.quantization_config
@property
def is_trainable(self) -> bool:
return False
def is_serializable(self, safe_serialization=None):
return True
| VptqHfQuantizer |
python | bokeh__bokeh | src/bokeh/plotting/_figure.py | {
"start": 2776,
"end": 29456
} | class ____(Plot, GlyphAPI):
''' Create a new figure for plotting.
A subclass of |Plot| that simplifies plot creation with default axes, grids,
tools, etc.
Figure objects have many glyph methods that can be used to draw
vectorized graphical glyphs:
.. hlist::
:columns: 3
* :func:`~bokeh.plotting.figure.annular_wedge`
* :func:`~bokeh.plotting.figure.annulus`
* :func:`~bokeh.plotting.figure.arc`
* :func:`~bokeh.plotting.figure.asterisk`
* :func:`~bokeh.plotting.figure.bezier`
* :func:`~bokeh.plotting.figure.circle`
* :func:`~bokeh.plotting.figure.circle_cross`
* :func:`~bokeh.plotting.figure.circle_dot`
* :func:`~bokeh.plotting.figure.circle_x`
* :func:`~bokeh.plotting.figure.circle_y`
* :func:`~bokeh.plotting.figure.cross`
* :func:`~bokeh.plotting.figure.dash`
* :func:`~bokeh.plotting.figure.diamond`
* :func:`~bokeh.plotting.figure.diamond_cross`
* :func:`~bokeh.plotting.figure.diamond_dot`
* :func:`~bokeh.plotting.figure.dot`
* :func:`~bokeh.plotting.figure.ellipse`
* :func:`~bokeh.plotting.figure.harea`
* :func:`~bokeh.plotting.figure.harea_step`
* :func:`~bokeh.plotting.figure.hbar`
* :func:`~bokeh.plotting.figure.hex`
* :func:`~bokeh.plotting.figure.hex_tile`
* :func:`~bokeh.plotting.figure.hstrip`
* :func:`~bokeh.plotting.figure.hspan`
* :func:`~bokeh.plotting.figure.image`
* :func:`~bokeh.plotting.figure.image_rgba`
* :func:`~bokeh.plotting.figure.image_url`
* :func:`~bokeh.plotting.figure.inverted_triangle`
* :func:`~bokeh.plotting.figure.line`
* :func:`~bokeh.plotting.figure.multi_line`
* :func:`~bokeh.plotting.figure.multi_polygons`
* :func:`~bokeh.plotting.figure.ngon`
* :func:`~bokeh.plotting.figure.patch`
* :func:`~bokeh.plotting.figure.patches`
* :func:`~bokeh.plotting.figure.plus`
* :func:`~bokeh.plotting.figure.quad`
* :func:`~bokeh.plotting.figure.quadratic`
* :func:`~bokeh.plotting.figure.ray`
* :func:`~bokeh.plotting.figure.rect`
* :func:`~bokeh.plotting.figure.segment`
* :func:`~bokeh.plotting.figure.square`
* :func:`~bokeh.plotting.figure.square_cross`
* :func:`~bokeh.plotting.figure.square_dot`
* :func:`~bokeh.plotting.figure.square_pin`
* :func:`~bokeh.plotting.figure.square_x`
* :func:`~bokeh.plotting.figure.star`
* :func:`~bokeh.plotting.figure.star_dot`
* :func:`~bokeh.plotting.figure.step`
* :func:`~bokeh.plotting.figure.text`
* :func:`~bokeh.plotting.figure.triangle`
* :func:`~bokeh.plotting.figure.triangle_dot`
* :func:`~bokeh.plotting.figure.triangle_pin`
* :func:`~bokeh.plotting.figure.varea`
* :func:`~bokeh.plotting.figure.varea_step`
* :func:`~bokeh.plotting.figure.vbar`
* :func:`~bokeh.plotting.figure.vstrip`
* :func:`~bokeh.plotting.figure.vspan`
* :func:`~bokeh.plotting.figure.wedge`
* :func:`~bokeh.plotting.figure.x`
* :func:`~bokeh.plotting.figure.y`
There is a scatter function that can be parameterized by marker type:
* :func:`~bokeh.plotting.figure.scatter`
There are also specialized methods for stacking bars:
* bars: :func:`~bokeh.plotting.figure.hbar_stack`, :func:`~bokeh.plotting.figure.vbar_stack`
* lines: :func:`~bokeh.plotting.figure.hline_stack`, :func:`~bokeh.plotting.figure.vline_stack`
* areas: :func:`~bokeh.plotting.figure.harea_stack`, :func:`~bokeh.plotting.figure.varea_stack`
As well as one specialized method for making simple hexbin plots:
* :func:`~bokeh.plotting.figure.hexbin`
In addition to all the ``figure`` property attributes, the following
options are also accepted:
.. bokeh-options:: FigureOptions
:module: bokeh.plotting._figure
'''
__view_model__ = "Figure"
def __init__(self, *arg, **kw) -> None:
opts = FigureOptions(kw)
names = self.properties()
for name in kw.keys():
if name not in names:
self._raise_attribute_error_with_matches(name, names | opts.properties())
super().__init__(*arg, **kw)
self.x_range = get_range(opts.x_range)
self.y_range = get_range(opts.y_range)
self.x_scale = get_scale(self.x_range, opts.x_axis_type)
self.y_scale = get_scale(self.y_range, opts.y_axis_type)
process_axis_and_grid(self, opts.x_axis_type, opts.x_axis_location, opts.x_minor_ticks, opts.x_axis_label, self.x_range, 0)
process_axis_and_grid(self, opts.y_axis_type, opts.y_axis_location, opts.y_minor_ticks, opts.y_axis_label, self.y_range, 1)
tool_objs, tool_map = process_tools_arg(self, opts.tools, opts.tooltips)
self.add_tools(*tool_objs)
process_active_tools(
self.toolbar,
tool_map,
opts.active_drag,
opts.active_inspect,
opts.active_scroll,
opts.active_tap,
opts.active_multi,
)
@property
def plot(self):
return self
@property
def coordinates(self):
return None
def subplot(self,
*,
x_source: Range | None = None, y_source: Range | None = None,
x_scale: Scale | None = None, y_scale: Scale | None = None,
x_target: Range, y_target: Range,
) -> GlyphAPI:
""" Create a new sub-coordinate system and expose a plotting API. """
coordinates = CoordinateMapping(x_source=x_source, y_source=y_source, x_target=x_target, y_target=y_target)
return GlyphAPI(self, coordinates)
def hexbin(self, x, y, size, orientation="pointytop", palette="Viridis256", line_color=None, fill_color=None, aspect_scale=1, **kwargs):
''' Perform a simple equal-weight hexagonal binning.
A :class:`~bokeh.models.glyphs.HexTile` glyph will be added to display
the binning. The :class:`~bokeh.models.sources.ColumnDataSource` for
the glyph will have columns ``q``, ``r``, and ``count``, where ``q``
and ``r`` are `axial coordinates`_ for a tile, and ``count`` is the
associated bin count.
It is often useful to set ``match_aspect=True`` on the associated plot,
so that hexagonal tiles are all regular (i.e. not "stretched") in
screen space.
For more sophisticated use-cases, e.g. weighted binning or individually
scaling hex tiles, use :func:`hex_tile` directly, or consider a higher
level library such as HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates to bin into hexagonal tiles.
y (array[float]) :
A NumPy array of y-coordinates to bin into hexagonal tiles.
size (float) :
The size of the hexagonal tiling to use. The size is defined as
distance from the center of a hexagon to a corner.
In case the aspect scaling is not 1-1, then specifically `size`
is the distance from the center to the "top" corner with the
`"pointytop"` orientation, and the distance from the center to
a "side" corner with the "flattop" orientation.
orientation ("pointytop" or "flattop", optional) :
Whether the hexagonal tiles should be oriented with a pointed
corner on top, or a flat side on top. (default: "pointytop")
palette (str or seq[color], optional) :
A palette (or palette name) to use to colormap the bins according
to count. (default: 'Viridis256')
If ``fill_color`` is supplied, it overrides this value.
line_color (color, optional) :
The outline color for hex tiles, or None (default: None)
fill_color (color, optional) :
An optional fill color for hex tiles, or None. If None, then
the ``palette`` will be used to color map the tiles by
count. (default: None)
aspect_scale (float) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Any additional keyword arguments are passed to :func:`hex_tile`.
Returns:
(Glyphrender, DataFrame)
A tuple with the ``HexTile`` renderer generated to display the
binning, and a Pandas ``DataFrame`` with columns ``q``, ``r``,
and ``count``, where ``q`` and ``r`` are `axial coordinates`_
for a tile, and ``count`` is the associated bin count.
Example:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.models import HoverTool
from bokeh.plotting import figure, show
x = 2 + 2*np.random.standard_normal(500)
y = 2 + 2*np.random.standard_normal(500)
p = figure(match_aspect=True, tools="wheel_zoom,reset")
p.background_fill_color = '#440154'
p.grid.visible = False
p.hexbin(x, y, size=0.5, hover_color="pink", hover_alpha=0.8)
hover = HoverTool(tooltips=[("count", "@c"), ("(q,r)", "(@q, @r)")])
p.add_tools(hover)
show(p)
.. _axial coordinates: https://www.redblobgames.com/grids/hexagons/#coordinates-axial
'''
from ..util.hex import hexbin
bins = hexbin(x, y, size, orientation, aspect_scale=aspect_scale)
if fill_color is None:
fill_color = linear_cmap('c', palette, 0, max(bins.counts))
source = ColumnDataSource(data=dict(q=bins.q, r=bins.r, c=bins.counts))
r = self.hex_tile(q="q", r="r", size=size, orientation=orientation, aspect_scale=aspect_scale,
source=source, line_color=line_color, fill_color=fill_color, **kwargs)
return (r, bins)
def harea_stack(self, stackers, **kw):
''' Generate multiple ``HArea`` renderers for levels stacked left
to right.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``x1`` and ``x2`` harea coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``harea``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``harea_stack``
will create two ``HArea`` renderers that stack:
.. code-block:: python
p.harea_stack(['2016', '2017'], y='y', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.harea(x1=stack(), x2=stack('2016'), y='y', color='blue', source=source, name='2016')
p.harea(x1=stack('2016'), x2=stack('2016', '2017'), y='y', color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "x1", "x2", **kw):
result.append(self.harea(**kw))
return result
def hbar_stack(self, stackers, **kw):
''' Generate multiple ``HBar`` renderers for levels stacked left to right.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``left`` and ``right`` bar coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``hbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``hbar_stack``
will create two ``HBar`` renderers that stack:
.. code-block:: python
p.hbar_stack(['2016', '2017'], y=10, width=0.9, color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.hbar(bottom=stack(), top=stack('2016'), y=10, width=0.9, color='blue', source=source, name='2016')
p.hbar(bottom=stack('2016'), top=stack('2016', '2017'), y=10, width=0.9, color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "left", "right", **kw):
result.append(self.hbar(**kw))
return result
def _line_stack(self, x, y, **kw):
''' Generate multiple ``Line`` renderers for lines stacked vertically
or horizontally.
Args:
x (seq[str]) :
y (seq[str]) :
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``hbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``line_stack`` with
stackers for the y-coordinates will create two ``Line``
renderers that stack:
.. code-block:: python
p.line_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(y=stack('2016'), x='x', color='blue', source=source, name='2016')
p.line(y=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
if all(isinstance(val, (list, tuple)) for val in (x,y)):
raise ValueError("Only one of x or y may be a list of stackers")
result = []
if isinstance(y, (list, tuple)):
kw['x'] = x
for kw in single_stack(y, "y", **kw):
result.append(self.line(**kw))
return result
if isinstance(x, (list, tuple)):
kw['y'] = y
for kw in single_stack(x, "x", **kw):
result.append(self.line(**kw))
return result
return [self.line(x, y, **kw)]
def hline_stack(self, stackers, **kw):
''' Generate multiple ``Line`` renderers for lines stacked horizontally.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``x`` line coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``line``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``hline_stack`` with
stackers for the x-coordinates will create two ``Line``
renderers that stack:
.. code-block:: python
p.hline_stack(['2016', '2017'], y='y', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(x=stack('2016'), y='y', color='blue', source=source, name='2016')
p.line(x=stack('2016', '2017'), y='y', color='red', source=source, name='2017')
'''
return self._line_stack(x=stackers, **kw)
def varea_stack(self, stackers, **kw):
''' Generate multiple ``VArea`` renderers for levels stacked bottom
to top.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``y1`` and ``y1`` varea coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``varea``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``varea_stack``
will create two ``VArea`` renderers that stack:
.. code-block:: python
p.varea_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.varea(y1=stack(), y2=stack('2016'), x='x', color='blue', source=source, name='2016')
p.varea(y1=stack('2016'), y2=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "y1", "y2", **kw):
result.append(self.varea(**kw))
return result
def vbar_stack(self, stackers, **kw):
''' Generate multiple ``VBar`` renderers for levels stacked bottom
to top.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``left`` and ``right`` bar coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``vbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``vbar_stack``
will create two ``VBar`` renderers that stack:
.. code-block:: python
p.vbar_stack(['2016', '2017'], x=10, width=0.9, color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.vbar(bottom=stack(), top=stack('2016'), x=10, width=0.9, color='blue', source=source, name='2016')
p.vbar(bottom=stack('2016'), top=stack('2016', '2017'), x=10, width=0.9, color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "bottom", "top", **kw):
result.append(self.vbar(**kw))
return result
def vline_stack(self, stackers, **kw):
''' Generate multiple ``Line`` renderers for lines stacked vertically.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``y`` line coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``line``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``vline_stack`` with
stackers for the y-coordinates will create two ``Line``
renderers that stack:
.. code-block:: python
p.vline_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(y=stack('2016'), x='x', color='blue', source=source, name='2016')
p.line(y=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
return self._line_stack(y=stackers, **kw)
def graph(self, node_source: ColumnDataSource, edge_source: ColumnDataSource, layout_provider: LayoutProvider, **kwargs):
''' Creates a network graph using the given node, edge and layout provider.
Args:
node_source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source
for the graph nodes. An attempt will be made to convert the object to
:class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created
for the user automatically.
edge_source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source
for the graph edges. An attempt will be made to convert the object to
:class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created
for the user automatically.
layout_provider (:class:`~bokeh.models.graphs.LayoutProvider`) : a ``LayoutProvider`` instance to
provide the graph coordinates in Cartesian space.
**kwargs: |line properties| and |fill properties|
'''
kw = get_graph_kwargs(node_source, edge_source, **kwargs)
graph_renderer = GraphRenderer(layout_provider=layout_provider, **kw)
self.renderers.append(graph_renderer)
return graph_renderer
def contour(
self,
x: ArrayLike | None = None,
y: ArrayLike | None = None,
z: ArrayLike | np.ma.MaskedArray | None = None,
levels: ArrayLike | None = None,
**visuals,
) -> ContourRenderer:
''' Creates a contour plot of filled polygons and/or contour lines.
Filled contour polygons are calculated if ``fill_color`` is set,
contour lines if ``line_color`` is set.
Args:
x (array-like[float] of shape (ny, nx) or (nx,), optional) :
The x-coordinates of the ``z`` values. May be 2D with the same
shape as ``z.shape``, or 1D with length ``nx = z.shape[1]``.
If not specified are assumed to be ``np.arange(nx)``. Must be
ordered monotonically.
y (array-like[float] of shape (ny, nx) or (ny,), optional) :
The y-coordinates of the ``z`` values. May be 2D with the same
shape as ``z.shape``, or 1D with length ``ny = z.shape[0]``.
If not specified are assumed to be ``np.arange(ny)``. Must be
ordered monotonically.
z (array-like[float] of shape (ny, nx)) :
A 2D NumPy array of gridded values to calculate the contours
of. It may be a masked array, and any invalid values (``np.inf``
or ``np.nan``) will also be masked out.
levels (array-like[float]) :
The z-levels to calculate the contours at, must be increasing.
Contour lines are calculated at each level and filled contours
are calculated between each adjacent pair of levels so the
number of sets of contour lines is ``len(levels)`` and the
number of sets of filled contour polygons is ``len(levels)-1``.
**visuals: |fill properties|, |hatch properties| and |line properties|
Fill and hatch properties are used for filled contours, line
properties for line contours. If using vectorized properties
then the correct number must be used, ``len(levels)`` for line
properties and ``len(levels)-1`` for fill and hatch properties.
``fill_color`` and ``line_color`` are more flexible in that
they will accept longer sequences and interpolate them to the
required number using :func:`~bokeh.palettes.linear_palette`,
and also accept palette collections (dictionaries mapping from
integer length to color sequence) such as
`bokeh.palettes.Cividis`.
'''
contour_renderer = from_contour(x, y, z, levels, **visuals)
self.renderers.append(contour_renderer)
return contour_renderer
def markers() -> None:
''' Prints a list of valid marker types for scatter()
Returns:
None
'''
print("Available markers: \n\n - " + "\n - ".join(list(MarkerType)))
print()
print("Shortcuts: \n\n" + "\n".join(f" {short!r}: {name}" for (short, name) in _MARKER_SHORTCUTS.items()))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
# This class itself is intentionally undocumented (it is used to generate
# documentation elsewhere)
| figure |
python | anthropics__anthropic-sdk-python | src/anthropic/_resource.py | {
"start": 208,
"end": 637
} | class ____:
_client: SyncAPIClient
def __init__(self, client: SyncAPIClient) -> None:
self._client = client
self._get = client.get
self._post = client.post
self._patch = client.patch
self._put = client.put
self._delete = client.delete
self._get_api_list = client.get_api_list
def _sleep(self, seconds: float) -> None:
time.sleep(seconds)
| SyncAPIResource |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_base_aws.py | {
"start": 5740,
"end": 6160
} | class ____(BaseSessionFactory):
def create_session(self):
return mock.MagicMock()
@pytest.fixture
def mock_conn(request):
conn = Connection(conn_type=MOCK_CONN_TYPE, conn_id=MOCK_AWS_CONN_ID)
if request.param == "unwrapped":
return conn
if request.param == "wrapped":
return AwsConnectionWrapper(conn=conn)
raise ValueError("invalid internal test config")
| CustomSessionFactory |
python | pytorch__pytorch | torch/cuda/_utils.py | {
"start": 9496,
"end": 10466
} | class ____:
def __init__(self, module: ctypes.c_void_p) -> None:
self._module = module
self._kernels: dict[str, _CudaKernel] = {}
def __getattr__(self, name: str) -> "_CudaKernel":
if name in self._kernels:
return self._kernels[name]
# Import the CUDA library inside the method
# pyrefly: ignore [missing-module-attribute]
from torch.cuda._utils import _get_gpu_runtime_library
libcuda = _get_gpu_runtime_library()
func = ctypes.c_void_p()
try:
_check_cuda(
libcuda.cuModuleGetFunction(
ctypes.byref(func), self._module, name.encode("utf-8")
)
)
kernel = _CudaKernel(func, self._module)
self._kernels[name] = kernel
return kernel
except RuntimeError as err:
raise AttributeError(f"No kernel named '{name}' in this module") from err
| _CudaModule |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 30814,
"end": 33777
} | class ____(ConvBertPreTrainedModel):
_tied_weights_keys = {"generator_lm_head.weight": "convbert.embeddings.word_embeddings.weight"}
def __init__(self, config):
super().__init__(config)
self.convbert = ConvBertModel(config)
self.generator_predictions = ConvBertGeneratorPredictions(config)
self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.generator_lm_head
def set_output_embeddings(self, word_embeddings):
self.generator_lm_head = word_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
generator_hidden_states = self.convbert(
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
output_attentions,
output_hidden_states,
return_dict,
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output)
prediction_scores = self.generator_lm_head(prediction_scores)
loss = None
# Masked language modeling softmax layer
if labels is not None:
loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + generator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return MaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=generator_hidden_states.hidden_states,
attentions=generator_hidden_states.attentions,
)
| ConvBertForMaskedLM |
python | getsentry__sentry | tests/sentry/deletions/test_monitor_environment.py | {
"start": 454,
"end": 3416
} | class ____(APITestCase, TransactionTestCase, HybridCloudTestMixin):
def test_simple(self) -> None:
project = self.create_project(name="test")
env = Environment.objects.create(organization_id=project.organization_id, name="foo")
env_2 = Environment.objects.create(organization_id=project.organization_id, name="bar")
monitor = Monitor.objects.create(
organization_id=project.organization.id,
project_id=project.id,
config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB},
)
monitor_env = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=env.id,
)
monitor_env_2 = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=env_2.id,
)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
project_id=project.id,
date_added=monitor.date_added,
status=CheckInStatus.OK,
)
checkin_2 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_env_2,
project_id=project.id,
date_added=monitor.date_added,
status=CheckInStatus.OK,
)
self.ScheduledDeletion.schedule(instance=monitor_env, days=0)
with self.tasks():
run_scheduled_deletions()
assert not MonitorEnvironment.objects.filter(id=monitor_env.id).exists()
assert not MonitorCheckIn.objects.filter(id=checkin.id).exists()
# Shared objects should continue to exist.
assert Monitor.objects.filter(id=monitor.id).exists()
assert MonitorEnvironment.objects.filter(id=monitor_env_2.id).exists()
assert MonitorCheckIn.objects.filter(id=checkin_2.id).exists()
assert Environment.objects.filter(id=env.id).exists()
assert Project.objects.filter(id=project.id).exists()
def test_relocated(self) -> None:
project = self.create_project(name="test")
env = Environment.objects.create(organization_id=project.organization_id, name="foo")
monitor = Monitor.objects.create(
organization_id=project.organization.id,
project_id=project.id,
config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB},
)
monitor_env = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=env.id,
)
# Fake the app_label back to the sentry app to test the
# RELOCATED_MODELS mapping
with mock.patch.object(monitor_env._meta, "app_label", "sentry"), self.tasks():
self.ScheduledDeletion.schedule(instance=monitor_env, days=0)
run_scheduled_deletions()
assert not MonitorEnvironment.objects.filter(id=monitor_env.id).exists()
| DeleteMonitorEnvironmentTest |
python | walkccc__LeetCode | solutions/1489. Find Critical and Pseudo-Critical Edges in Minimum Spanning Tree/1489.py | {
"start": 514,
"end": 1948
} | class ____:
def findCriticalAndPseudoCriticalEdges(self, n: int, edges: list[list[int]]) -> list[list[int]]:
criticalEdges = []
pseudoCriticalEdges = []
# Record the index information, so edges[i] := (u, v, weight, index).
for i in range(len(edges)):
edges[i].append(i)
# Sort by the weight.
edges.sort(key=lambda x: x[2])
def getMSTWeight(
firstEdge: list[int],
deletedEdgeIndex: int) -> int | float:
mstWeight = 0
uf = UnionFind(n)
if firstEdge:
uf.unionByRank(firstEdge[0], firstEdge[1])
mstWeight += firstEdge[2]
for u, v, weight, index in edges:
if index == deletedEdgeIndex:
continue
if uf.find(u) == uf.find(v):
continue
uf.unionByRank(u, v)
mstWeight += weight
root = uf.find(0)
if any(uf.find(i) != root for i in range(n)):
return math.inf
return mstWeight
mstWeight = getMSTWeight([], -1)
for edge in edges:
index = edge[3]
# Deleting the `edge` increases the weight of the MST or makes the MST
# invalid.
if getMSTWeight([], index) > mstWeight:
criticalEdges.append(index)
# If an edge can be in any MST, we can always add `edge` to the edge set.
elif getMSTWeight(edge, -1) == mstWeight:
pseudoCriticalEdges.append(index)
return [criticalEdges, pseudoCriticalEdges]
| Solution |
python | coleifer__peewee | playhouse/sqlite_udf.py | {
"start": 9736,
"end": 10309
} | class ____(_heap_agg):
def finalize(self):
if self.ct == 0:
return
elif self.ct == 1:
return 0
prev = min_diff = None
while self.heap:
if min_diff is None:
if prev is None:
prev = heapq.heappop(self.heap)
continue
curr = heapq.heappop(self.heap)
diff = curr - prev
if min_diff is None or min_diff > diff:
min_diff = diff
prev = curr
return min_diff
@aggregate(MATH)
| minrange |
python | huggingface__transformers | tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py | {
"start": 35836,
"end": 44063
} | class ____(unittest.TestCase):
repo_id = "facebook/seamless-m4t-v2-large"
def assertListAlmostEqual(self, list1, list2, tol=1e-4):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
@cached_property
def processor(self):
return SeamlessM4TProcessor.from_pretrained(self.repo_id)
@cached_property
def input_text(self):
# corresponds to "C'est un test." with seamlessM4T_medium checkpoint
input_ids = torch.tensor([[256026, 109, 247729, 171, 128, 6816, 247676, 3]]) # fmt: skip
input_ids = input_ids.to(torch_device)
attention_mask = torch.ones_like(input_ids).to(torch_device)
inputs = {
"attention_mask": attention_mask,
"input_ids": input_ids,
}
return inputs
@cached_property
def input_audio(self):
set_seed(0)
seq_len = 20000
sampling_rate = 16000
input_features = torch.rand((2, seq_len))
return self.processor(audio=[input_features.tolist()], sampling_rate=sampling_rate, return_tensors="pt").to(
torch_device
)
def factory_test_task(self, class1, class2, inputs, class1_kwargs, class2_kwargs):
# half-precision loading to limit GPU usage
model1 = class1.from_pretrained(self.repo_id, dtype=torch.float16).to(torch_device)
model2 = class2.from_pretrained(self.repo_id, dtype=torch.float16).to(torch_device)
set_seed(0)
output_1 = model1.generate(**inputs, **class1_kwargs)
set_seed(0)
output_2 = model2.generate(**inputs, **class2_kwargs)
for key in output_1:
if isinstance(output_1[key], torch.Tensor):
if len(output_1[key].shape) == 0:
self.assertEqual(output_1[key].item(), output_2[key].item())
else:
self.assertListAlmostEqual(output_1[key].squeeze().tolist(), output_2[key].squeeze().tolist())
@slow
def test_to_eng_text(self):
model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device)
# test text - tgt lang: eng
expected_text_tokens = [3, 256022, 3080, 1, 247669, 10, 6816, 247676, 3] # fmt: skip
# fmt: off
expected_unit_tokens = [
4746,7163,8208,8208,1315,1266,4307,1119,989,9594,3007,3007,4341,5205,7631,7631,3202,4061,9092,3191,7509,1715,
5280,5280,3554,8812,8197,6366,5382,5382,7330,2758,9433,9433,6863,7510,5800,5800,5286,1948,1825,1825,3956,8724,
8724,5331,8914,9315,9315,5288,2588,8167,8787,8787,8063,6008,2621,2621,2621,5696
]
# fmt: on
expected_wav_slice = [9.485097e-04, 8.320558e-04, 7.178137e-04, 9.349979e-04, 1.121628e-03, 1.091766e-03, 1.279693e-03, 1.387754e-03, 1.296396e-03, 1.143557e-03] # fmt: skip
set_seed(0)
output = model.generate(**self.input_text, num_beams=1, tgt_lang="eng", return_intermediate_token_ids=True)
self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist())
self.assertListEqual(
expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist()
)
self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60])
# assert mean and std equality
self.assertListAlmostEqual(
[-2.349690e-04, 9.920777e-02], [output.waveform.mean().item(), output.waveform.std().item()]
)
@slow
@unittest.skip(reason="Equivalence is broken since a new update")
def test_to_swh_text(self):
model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device)
# test text - tgt lang: swh
expected_text_tokens = [3, 256084, 109, 247729, 171, 10, 6816, 247676, 3] # fmt: skip
# fmt: off
expected_unit_tokens = [
5725,7163,7472,7472,6915,3099,3099,9921,2765,6515,6515,1374,1374,1347,8252,9854,9854,5662,2420,6600,2216,4503,
7208,6107,6107,7298,9123,6472,9663,9663,6366,6366,6445,575,3575,2052,2052,5788,5800,5800,5286,5286,1825,1825,3956,
3956,8724,8724,5331,8914,8914,9315,9315,2821,8167,8167,8787,8787,8787,8700,8700,8700,2175,2175,3196,3196,2621,1725,
1725,7507,5696
]
# fmt: on
expected_wav_slice = [3.124037e-04, 2.450471e-04, 2.286572e-04, 2.317214e-04, 2.732605e-04, 2.478790e-04, 2.704144e-04, 2.665847e-04, 2.828784e-04, 2.684390e-04] # fmt: skip
set_seed(0)
output = model.generate(**self.input_text, num_beams=1, tgt_lang="swh", return_intermediate_token_ids=True)
self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist())
self.assertListEqual(
expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist()
)
self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60])
# assert mean and std equality
self.assertListAlmostEqual(
[-2.001826e-04, 8.580012e-02], [output.waveform.mean().item(), output.waveform.std().item()]
)
@require_speech
@slow
def test_to_rus_speech(self):
model = SeamlessM4Tv2Model.from_pretrained(self.repo_id).to(torch_device)
# test audio - tgt lang: rus
expected_text_tokens = [3, 256074, 107, 248213, 404, 247792, 247789, 3] # fmt: skip
# fmt: off
expected_unit_tokens = [
8976,7163,6915,2728,2728,5198,3318,3318,3686,1049,9643,1200,2052,2052,8196,8196,7624,7624,7555,7555,7555,7555,
9717,9717,4869,8167,8167,8167,8053,972,9362,8167,297,297,297,3993,3993,3993,3993,4660,4660,4660,4660,4660,4660,
7962,7962,225,225,8737,4199
]
# fmt: on
expected_wav_slice = [1.415287e-03, 1.360976e-03, 1.297727e-03, 1.305321e-03, 1.352087e-03, 1.283812e-03, 1.352623e-03, 1.387384e-03, 1.449627e-03, 1.411701e-03] # fmt: skip
set_seed(0)
output = model.generate(**self.input_audio, num_beams=1, tgt_lang="rus", return_intermediate_token_ids=True)
self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist())
self.assertListEqual(
expected_unit_tokens, (output.unit_sequences - model.config.vocoder_offset).squeeze().tolist()
)
self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60])
# assert mean and std equality - higher tolerance for speech
self.assertListAlmostEqual(
[-2.818016e-04, 7.169888e-02], [output.waveform.mean().item(), output.waveform.std().item()], tol=5e-4
)
@slow
def test_text_to_text_model(self):
kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False}
kwargs2 = {
"tgt_lang": "eng",
"output_hidden_states": True,
"return_dict_in_generate": True,
"output_scores": True,
}
self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForTextToText, self.input_text, kwargs1, kwargs2)
@require_speech
@slow
def test_speech_to_text_model(self):
kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False}
kwargs2 = {
"tgt_lang": "eng",
"output_hidden_states": True,
"return_dict_in_generate": True,
"output_scores": True,
}
self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToText, self.input_audio, kwargs1, kwargs2)
@require_speech
@slow
def test_speech_to_speech_model(self):
kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True}
self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForSpeechToSpeech, self.input_audio, kwargs1, kwargs1)
@slow
def test_text_to_speech_model(self):
kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True}
self.factory_test_task(SeamlessM4Tv2Model, SeamlessM4Tv2ForTextToSpeech, self.input_text, kwargs1, kwargs1)
| SeamlessM4Tv2ModelIntegrationTest |
python | google__pytype | pytype/load_pytd.py | {
"start": 3955,
"end": 8011
} | class ____:
"""A map of fully qualified module name -> Module."""
def __init__(self, options, modules):
self.options = options
self._modules: dict[str, Module] = modules or self._base_modules()
if self._modules["builtins"].needs_unpickling():
self._unpickle_module(self._modules["builtins"])
if self._modules["typing"].needs_unpickling():
self._unpickle_module(self._modules["typing"])
self._concatenated = None
def __getitem__(self, key):
return self._modules[key]
def __setitem__(self, key, val):
self._modules[key] = val
def __delitem__(self, key):
del self._modules[key]
def __contains__(self, key):
return key in self._modules
def items(self):
return self._modules.items()
def values(self):
return self._modules.values()
def get(self, key):
return self._modules.get(key)
def get_existing_ast(self, module_name: str) -> _AST | None:
existing = self._modules.get(module_name)
if existing:
if existing.needs_unpickling():
self._unpickle_module(existing)
return existing.ast
return None
def defined_asts(self) -> Iterable[_AST]:
"""All module ASTs that are not None."""
return (module.ast for module in self._modules.values() if module.ast)
def get_module_map(self) -> dict[str, _AST]:
"""Get a {name: ast} map of all modules with a filled-in ast."""
return {
name: module.ast for name, module in self._modules.items() if module.ast
}
def get_resolved_modules(self) -> dict[str, ResolvedModule]:
"""Get a {name: ResolvedModule} map of all resolved modules."""
resolved_modules = {}
for name, mod in self._modules.items():
if not mod.has_unresolved_pointers:
resolved_modules[name] = ResolvedModule(
mod.module_name, mod.filename, mod.ast, mod.metadata
)
return resolved_modules
def _base_modules(self):
bltins, typing = builtin_stubs.GetBuiltinsAndTyping(
parser.PyiOptions.from_toplevel_options(self.options)
)
return {
"builtins": Module.resolved_internal_stub("builtins", bltins),
"typing": Module.resolved_internal_stub("typing", typing),
}
def _unpickle_module(self, module):
"""Unpickle a pickled ast and its dependencies."""
if not module.pickle:
return
todo = [module]
seen = set()
newly_loaded_asts = []
while todo:
m = todo.pop()
if m in seen:
continue
else:
seen.add(m)
if not m.pickle:
continue
loaded_ast = pickle_utils.DecodeAst(m.pickle)
deps = [d for d, _ in loaded_ast.dependencies if d != loaded_ast.ast.name]
loaded_ast = serialize_ast.EnsureAstName(loaded_ast, m.module_name)
assert m.module_name in self._modules
for dependency in deps:
module_prefix = dependency
while module_prefix not in self._modules:
if "." in module_prefix:
module_prefix, _, _ = module_prefix.rpartition(".")
else:
raise KeyError(f"Module not found: {dependency}")
todo.append(self._modules[module_prefix])
newly_loaded_asts.append(loaded_ast)
m.ast = loaded_ast.ast
if _is_package(loaded_ast.src_path):
init_file = f"__init__{file_utils.PICKLE_EXT}"
if m.filename and path_utils.basename(m.filename) != init_file:
base, _ = path_utils.splitext(m.filename)
m.filename = path_utils.join(base, init_file)
else:
m.filename = imports_base.internal_stub_filename(
path_utils.join(m.module_name, init_file)
)
m.pickle = None
module_map = self.get_module_map()
for loaded_ast in newly_loaded_asts:
serialize_ast.FillLocalReferences(loaded_ast, module_map)
assert module.ast
def concat_all(self):
if not self._concatenated:
self._concatenated = pytd_utils.Concat(*self.defined_asts(), name="<all>")
return self._concatenated
def invalidate_concatenated(self):
self._concatenated = None
| _ModuleMap |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.