language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/544. Output Contest Matches/544.py | {
"start": 0,
"end": 438
} | class ____:
def findContestMatch(self, n: int) -> str:
def generateMatches(matches: list[str]) -> str:
if len(matches) == 1:
return matches[0]
nextMatches = []
for i in range(len(matches) // 2):
nextMatches.append(
'(' + matches[i] + ',' + matches[len(matches) - 1 - i] + ')')
return generateMatches(nextMatches)
return generateMatches([str(i + 1) for i in range(n)])
| Solution |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_compiler.py | {
"start": 2648,
"end": 4079
} | class ____(AssertsCompiledSQL):
@testing.fixture()
def mysql_mariadb_reserved_words(self):
table = Table(
"rw_table",
MetaData(),
Column("mysql_reserved", Integer),
Column("mdb_mysql_reserved", Integer),
Column("mdb_reserved", Integer),
)
expected_mysql = (
"SELECT rw_table.`mysql_reserved`, "
"rw_table.`mdb_mysql_reserved`, "
"rw_table.mdb_reserved FROM rw_table"
)
expected_mdb = (
"SELECT rw_table.mysql_reserved, "
"rw_table.`mdb_mysql_reserved`, "
"rw_table.`mdb_reserved` FROM rw_table"
)
from sqlalchemy.dialects.mysql import reserved_words
reserved_words.RESERVED_WORDS_MARIADB.add("mdb_reserved")
reserved_words.RESERVED_WORDS_MYSQL.add("mysql_reserved")
reserved_words.RESERVED_WORDS_MYSQL.add("mdb_mysql_reserved")
reserved_words.RESERVED_WORDS_MARIADB.add("mdb_mysql_reserved")
try:
yield table, expected_mysql, expected_mdb
finally:
reserved_words.RESERVED_WORDS_MARIADB.discard("mdb_reserved")
reserved_words.RESERVED_WORDS_MYSQL.discard("mysql_reserved")
reserved_words.RESERVED_WORDS_MYSQL.discard("mdb_mysql_reserved")
reserved_words.RESERVED_WORDS_MARIADB.discard("mdb_mysql_reserved")
| ReservedWordFixture |
python | falconry__falcon | falcon/routing/converters.py | {
"start": 4126,
"end": 5306
} | class ____(BaseConverter):
"""Converts a field value to an float.
Identifier: `float`
Keyword Args:
min (float): Reject the value if it is less than this number.
max (float): Reject the value if it is greater than this number.
finite (bool) : Determines whether or not to only match ordinary
finite numbers (default: ``True``). Set to ``False`` to match
``nan``, ``inf``, and ``-inf`` in addition to finite numbers.
.. versionadded:: 4.0
"""
__slots__ = '_finite', '_min', '_max'
def __init__(
self,
min: float | None = None,
max: float | None = None,
finite: bool = True,
) -> None:
self._min = min
self._max = max
self._finite = finite if finite is not None else True
def convert(self, value: str) -> float | None:
if value.strip() != value:
return None
try:
converted = float(value)
if self._finite and not isfinite(converted):
return None
except ValueError:
return None
return _validate_min_max_value(self, converted)
| FloatConverter |
python | scipy__scipy | scipy/integrate/_rules/_base.py | {
"start": 12275,
"end": 17927
} | class ____(NestedFixedRule):
"""
Find the n-dimensional cubature rule constructed from the Cartesian product of 1-D
`NestedFixedRule` quadrature rules.
Given a list of N 1-dimensional quadrature rules which support error estimation
using NestedFixedRule, this will find the N-dimensional cubature rule obtained by
taking the Cartesian product of their nodes, and estimating the error by taking the
difference with a lower-accuracy N-dimensional cubature rule obtained using the
``.lower_nodes_and_weights`` rule in each of the base 1-dimensional rules.
Parameters
----------
base_rules : list of NestedFixedRule
List of base 1-dimensional `NestedFixedRule` quadrature rules.
Attributes
----------
base_rules : list of NestedFixedRule
List of base 1-dimensional `NestedFixedRule` qudarature rules.
Examples
--------
Evaluate a 2D integral by taking the product of two 1D rules:
>>> import numpy as np
>>> from scipy.integrate import cubature
>>> from scipy.integrate._rules import (
... ProductNestedFixed, GaussKronrodQuadrature
... )
>>> def f(x):
... # f(x) = cos(x_1) + cos(x_2)
... return np.sum(np.cos(x), axis=-1)
>>> rule = ProductNestedFixed(
... [GaussKronrodQuadrature(15), GaussKronrodQuadrature(15)]
... ) # Use 15-point Gauss-Kronrod, which implements NestedFixedRule
>>> a, b = np.array([0, 0]), np.array([1, 1])
>>> rule.estimate(f, a, b) # True value 2*sin(1), approximately 1.6829
np.float64(1.682941969615793)
>>> rule.estimate_error(f, a, b)
np.float64(2.220446049250313e-16)
"""
def __init__(self, base_rules):
for rule in base_rules:
if not isinstance(rule, NestedFixedRule):
raise ValueError("base rules for product need to be instance of"
"NestedFixedRule")
self.base_rules = base_rules
self.xp = None
@cached_property
def nodes_and_weights(self):
nodes = _cartesian_product(
[rule.nodes_and_weights[0] for rule in self.base_rules]
)
if self.xp is None:
self.xp = array_namespace(nodes)
weights = self.xp.prod(
_cartesian_product(
[rule.nodes_and_weights[1] for rule in self.base_rules]
),
axis=-1,
)
return nodes, weights
@cached_property
def lower_nodes_and_weights(self):
nodes = _cartesian_product(
[cubature.lower_nodes_and_weights[0] for cubature in self.base_rules]
)
if self.xp is None:
self.xp = array_namespace(nodes)
weights = self.xp.prod(
_cartesian_product(
[cubature.lower_nodes_and_weights[1] for cubature in self.base_rules]
),
axis=-1,
)
return nodes, weights
def _cartesian_product(arrays):
xp = array_namespace(*arrays)
arrays_ix = xp.meshgrid(*arrays, indexing='ij')
result = xp.reshape(xp.stack(arrays_ix, axis=-1), (-1, len(arrays)))
return result
def _split_subregion(a, b, xp, split_at=None):
"""
Given the coordinates of a region like a=[0, 0] and b=[1, 1], yield the coordinates
of all subregions, which in this case would be::
([0, 0], [1/2, 1/2]),
([0, 1/2], [1/2, 1]),
([1/2, 0], [1, 1/2]),
([1/2, 1/2], [1, 1])
"""
xp = array_namespace(a, b)
if split_at is None:
split_at = (a + b) / 2
left = [xp.stack((a[i], split_at[i])) for i in range(a.shape[0])]
right = [xp.stack((split_at[i], b[i])) for i in range(b.shape[0])]
a_sub = _cartesian_product(left)
b_sub = _cartesian_product(right)
for i in range(a_sub.shape[0]):
yield a_sub[i, ...], b_sub[i, ...]
def _apply_fixed_rule(f, a, b, orig_nodes, orig_weights, args, xp):
# Downcast nodes and weights to common dtype of a and b
result_dtype = a.dtype
orig_nodes = xp.astype(orig_nodes, result_dtype)
orig_weights = xp.astype(orig_weights, result_dtype)
# Ensure orig_nodes are at least 2D, since 1D cubature methods can return arrays of
# shape (npoints,) rather than (npoints, 1)
if orig_nodes.ndim == 1:
orig_nodes = orig_nodes[:, None]
rule_ndim = orig_nodes.shape[-1]
a_ndim = xp_size(a)
b_ndim = xp_size(b)
if rule_ndim != a_ndim or rule_ndim != b_ndim:
raise ValueError(f"rule and function are of incompatible dimension, nodes have"
f"ndim {rule_ndim}, while limit of integration has ndim"
f"a_ndim={a_ndim}, b_ndim={b_ndim}")
lengths = b - a
# The underlying rule is for the hypercube [-1, 1]^n.
#
# To handle arbitrary regions of integration, it's necessary to apply a linear
# change of coordinates to map each interval [a[i], b[i]] to [-1, 1].
nodes = (orig_nodes + 1) * (lengths * 0.5) + a
# Also need to multiply the weights by a scale factor equal to the determinant
# of the Jacobian for this coordinate change.
weight_scale_factor = xp.prod(lengths, dtype=result_dtype) / 2**rule_ndim
weights = orig_weights * weight_scale_factor
f_nodes = f(nodes, *args)
weights_reshaped = xp.reshape(weights, (-1, *([1] * (f_nodes.ndim - 1))))
# f(nodes) will have shape (num_nodes, output_dim_1, ..., output_dim_n)
# Summing along the first axis means estimate will shape (output_dim_1, ...,
# output_dim_n)
est = xp.sum(weights_reshaped * f_nodes, axis=0, dtype=result_dtype)
return est
| ProductNestedFixed |
python | streamlit__streamlit | lib/tests/streamlit/connections/base_connection_test.py | {
"start": 901,
"end": 1014
} | class ____:
def some_raw_connection_method(self):
return "some raw connection method"
| MockRawConnection |
python | django__django | django/utils/crypto.py | {
"start": 183,
"end": 2661
} | class ____(ValueError):
"""Algorithm is not supported by hashlib."""
pass
def salted_hmac(key_salt, value, secret=None, *, algorithm="sha1"):
"""
Return the HMAC of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY). Default algorithm is SHA1,
but any algorithm name supported by hashlib can be passed.
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
try:
hasher = getattr(hashlib, algorithm)
except AttributeError as e:
raise InvalidAlgorithm(
"%r is not an algorithm accepted by the hashlib module." % algorithm
) from e
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function.
key = hasher(key_salt + secret).digest()
# If len(key_salt + secret) > block size of the hash algorithm, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=force_bytes(value), digestmod=hasher)
RANDOM_STRING_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS):
"""
Return a securely generated random string.
The bit length of the returned value can be calculated with the formula:
log_2(len(allowed_chars)^length)
For example, with default `allowed_chars` (26+26+10), this gives:
* length: 12, bit length =~ 71 bits
* length: 22, bit length =~ 131 bits
"""
return "".join(secrets.choice(allowed_chars) for i in range(length))
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return secrets.compare_digest(force_bytes(val1), force_bytes(val2))
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""Return the hash of password using pbkdf2."""
if digest is None:
digest = hashlib.sha256
dklen = dklen or None
password = force_bytes(password)
salt = force_bytes(salt)
return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
| InvalidAlgorithm |
python | jupyterlab__jupyterlab | examples/app/main.py | {
"start": 578,
"end": 1433
} | class ____(LabServerApp):
extension_url = "/lab"
default_url = "/lab"
name = __name__
load_other_extensions = False
app_name = "JupyterLab Example App"
app_settings_dir = os.path.join(HERE, "build", "application_settings")
app_version = version
schemas_dir = os.path.join(HERE, "build", "schemas")
static_dir = os.path.join(HERE, "build")
templates_dir = os.path.join(HERE, "templates")
themes_dir = os.path.join(HERE, "build", "themes")
user_settings_dir = os.path.join(HERE, "build", "user_settings")
workspaces_dir = os.path.join(HERE, "build", "workspaces")
def initialize_settings(self):
super().initialize_settings()
settings = self.serverapp.web_app.settings
settings["terminals_available"] = False
if __name__ == "__main__":
ExampleApp.launch_instance()
| ExampleApp |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/feature_store.py | {
"start": 19668,
"end": 24457
} | class ____(GoogleCloudBaseOperator, OperationHelper):
"""
Fetch features data from the Feature View provided.
This method fetches data from existing Feature view, filtered by provided (or default) data_key.
Helps to retrieve actual features data hosted in the VertexAI Feature Store.
:param entity_id: Simple ID to identify Entity to fetch feature values for.
:param feature_view_id: The FeatureView ID to fetch data from.
:param feature_online_store_id: The ID of the online feature store.
:param data_key: The request key to fetch feature values for.
:param project_id: Required. The ID of the Google Cloud project that contains the feature store.
This is used to identify which project's resources to interact with.
:param location: Required. The location of the feature store (e.g., 'us-central1', 'us-east1').
This specifies the Google Cloud region where the feature store resources are located.
:param gcp_conn_id: The connection ID to use for connecting to Google Cloud Platform.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials. Can be either a single account or a chain of accounts required to
get the access_token of the last account in the list, which will be impersonated
in the request. If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role. If set as a sequence, the identities
from the list must grant Service Account Token Creator IAM role to the directly
preceding identity, with first account from the list granting this role to the
originating account.
"""
template_fields: Sequence[str] = (
"project_id",
"location",
"feature_online_store_id",
"feature_view_id",
"entity_id",
)
def __init__(
self,
*,
feature_view_id: str,
feature_online_store_id: str,
project_id: str,
location: str,
entity_id: str | None = None,
data_key: FeatureViewDataKey | None = None,
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.entity_id = entity_id
self.feature_view_id = feature_view_id
self.feature_online_store_id = feature_online_store_id
self.data_key = data_key
self.timeout = timeout
self.retry = retry
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict[str, Any]:
"""Execute the get feature view sync operation."""
hook = FeatureStoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
feature_online_store = hook.get_feature_online_store(
feature_online_store_id=self.feature_online_store_id,
project_id=self.project_id,
location=self.location,
)
public_domain_name = hook._get_featurestore_public_endpoint(feature_online_store)
except GoogleAPICallError as ex:
exc_msg = f"Google API error getting {self.feature_online_store_id} Feature Online Store instance"
raise AirflowException(exc_msg) from ex
self.log.info(
"Fetching data from the Feature View %s, Online Feature Store %s.",
self.feature_view_id,
self.feature_online_store_id,
)
request_result = hook.fetch_feature_values(
project_id=self.project_id,
location=self.location,
endpoint_domain_name=public_domain_name,
entity_id=self.entity_id,
feature_view_id=self.feature_view_id,
feature_online_store_id=self.feature_online_store_id,
data_key=self.data_key,
data_format=FeatureViewDataFormat.KEY_VALUE,
timeout=self.timeout,
retry=self.retry,
metadata=self.metadata,
)
self.log.info(
"Fetching data from the Feature View %s, Online Feature Store %s. is finished.",
self.feature_view_id,
self.feature_online_store_id,
)
result = type(request_result).to_dict(request_result)
return result
| FetchFeatureValuesOperator |
python | pallets__jinja | src/jinja2/loaders.py | {
"start": 21400,
"end": 23709
} | class ____(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ModuleLoader('/path/to/compiled/templates')
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(
self,
path: t.Union[
str, "os.PathLike[str]", t.Sequence[t.Union[str, "os.PathLike[str]"]]
],
) -> None:
package_name = f"_jinja2_module_templates_{id(self):x}"
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if not isinstance(path, abc.Iterable) or isinstance(path, str):
path = [path]
mod.__path__ = [os.fspath(p) for p in path]
sys.modules[package_name] = weakref.proxy(
mod, lambda x: sys.modules.pop(package_name, None)
)
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name: str) -> str:
return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
@staticmethod
def get_module_filename(name: str) -> str:
return ModuleLoader.get_template_key(name) + ".py"
@internalcode
def load(
self,
environment: "Environment",
name: str,
globals: t.MutableMapping[str, t.Any] | None = None,
) -> "Template":
key = self.get_template_key(name)
module = f"{self.package_name}.{key}"
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ["root"])
except ImportError as e:
raise TemplateNotFound(name) from e
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
if globals is None:
globals = {}
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals
)
| ModuleLoader |
python | PyCQA__pylint | tests/functional/g/generic_alias/generic_alias_side_effects.py | {
"start": 2011,
"end": 2057
} | class ____(Generic[OUT]):
pass
| ProducingMixin |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 13470,
"end": 13878
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
text_document: TextDocumentIdentifier
text: Optional[str] = None
@staticmethod
def from_json_rpc_parameters(
parameters: json_rpc.Parameters,
) -> "DidSaveTextDocumentParameters":
return _parse_parameters(parameters, target=DidSaveTextDocumentParameters)
@dataclasses.dataclass(frozen=True)
| DidSaveTextDocumentParameters |
python | pallets__werkzeug | examples/cupoftee/db.py | {
"start": 201,
"end": 1668
} | class ____:
def __init__(self, filename):
self.filename = filename
self._fs = dbm.open(filename, "cf")
self._local = {}
self._lock = Lock()
def __getitem__(self, key):
with self._lock:
return self._load_key(key)
def _load_key(self, key):
if key in self._local:
return self._local[key]
rv = loads(self._fs[key])
self._local[key] = rv
return rv
def __setitem__(self, key, value):
self._local[key] = value
def __delitem__(self, key):
with self._lock:
self._local.pop(key, None)
if key in self._fs:
del self._fs[key]
def __del__(self):
self.close()
def __contains__(self, key):
with self._lock:
try:
self._load_key(key)
except KeyError:
pass
return key in self._local
def setdefault(self, key, factory):
with self._lock:
try:
rv = self._load_key(key)
except KeyError:
self._local[key] = rv = factory()
return rv
def sync(self):
with self._lock:
for key, value in self._local.items():
self._fs[key] = dumps(value, 2)
self._fs.sync()
def close(self):
try:
self.sync()
self._fs.close()
except Exception:
pass
| Database |
python | keras-team__keras | keras/src/utils/code_stats_test.py | {
"start": 137,
"end": 5709
} | class ____(test_case.TestCase):
def setUp(self):
self.test_dir = "test_directory"
os.makedirs(self.test_dir, exist_ok=True)
def tearDown(self):
for root, dirs, files in os.walk(self.test_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
def create_file(self, filename, content):
with open(
os.path.join(self.test_dir, filename), "w", encoding="utf-8"
) as f:
f.write(content)
def test_count_loc_valid_python(self):
self.create_file(
"sample.py", "# This is a test file\n\nprint('Hello')\n"
)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1)
def test_exclude_test_files(self):
self.create_file("sample_test.py", "print('Hello')\n")
loc = count_loc(self.test_dir, exclude=("_test",))
self.assertEqual(loc, 0)
def test_other_extensions(self):
self.create_file("sample.txt", "Hello\n")
loc = count_loc(self.test_dir, extensions=(".py",))
self.assertEqual(loc, 0)
def test_comment_lines(self):
self.create_file(
"sample.py", "# Comment\nprint('Hello')\n# Another comment\n"
)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1)
def test_empty_file(self):
self.create_file("empty.py", "")
loc = count_loc(self.test_dir)
self.assertEqual(loc, 0)
def test_whitespace_only(self):
self.create_file("whitespace.py", " \n\t\n")
loc = count_loc(self.test_dir)
self.assertEqual(loc, 0)
def test_inline_comments_after_code(self):
content = 'print("Hello") # This is an inline comment'
self.create_file("inline_comment_sample.py", content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1) # The comment shouldn't affect the count
def test_directory_structure(self):
content1 = 'print("Hello from file1")'
content2 = 'print("Hello from file2")'
os.mkdir(os.path.join(self.test_dir, "subdir"))
self.create_file("sample1.py", content1)
self.create_file(os.path.join("subdir", "sample2.py"), content2)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 2) # Both files should be counted
def test_normal_directory_name(self):
content = 'print("Hello from a regular directory")'
os.makedirs(os.path.join(self.test_dir, "some_test_dir"))
self.create_file(os.path.join("some_test_dir", "sample.py"), content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1) # Should count normally
def test_exclude_directory_name(self):
content = 'print("Hello from an excluded directory")'
os.makedirs(os.path.join(self.test_dir, "dir_test"))
self.create_file(os.path.join("dir_test", "sample.py"), content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 0)
# Shouldn't count the file in dir_test due to the exclusion pattern
def test_verbose_output(self):
content = 'print("Hello")'
self.create_file("sample.py", content)
original_stdout = sys.stdout
sys.stdout = StringIO()
count_loc(self.test_dir, verbose=1)
output = sys.stdout.getvalue()
sys.stdout = original_stdout
self.assertIn("Count LoCs in", output)
def test_multiline_string_same_line(self):
content = '''"""This is a multiline string ending on the same line"""
print("Outside string")'''
self.create_file("same_line_multiline.py", content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1) # Only the print statement should count
def test_multiline_string_ends_on_same_line(self):
content = '"""a multiline string end on same line"""\nprint("Outstr")'
self.create_file("same_line_multiline.py", content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 1) # Only the print statement should count
def test_multiline_string_ends_in_middle_of_line(self):
content = '''print("Start")
"""This is a multiline string ending in the middle of a line"""
"""This is another multiline string."""
print("End")'''
self.create_file("multiline_in_middle.py", content)
loc = count_loc(self.test_dir)
self.assertEqual(loc, 2) # Both print statements should count
def test_line_starting_with_triple_quotes_not_ending(self):
content = '"""\nThis is a multiline string\n'
self.create_file("test_file_2.py", content)
path = os.path.join(self.test_dir, "test_file_2.py")
self.assertEqual(count_loc(path), 0)
# Because it's part of a multiline string
def test_line_starting_and_ending_with_triple_quotes(self):
content = '"""This is a one-liner docstring."""\n'
self.create_file("test_file_3.py", content)
path = os.path.join(self.test_dir, "test_file_3.py")
self.assertEqual(count_loc(path), 0)
# This is still considered a comment/docstring
def test_string_open_true_line_starting_with_triple_quotes(self):
content = '"""\nEnd of the multiline string."""\n'
self.create_file("test_file_4.py", content)
path = os.path.join(self.test_dir, "test_file_4.py")
self.assertEqual(count_loc(path), 0)
# Entire content is a multiline string/comment
| TestCountLoc |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_selects.py | {
"start": 2402,
"end": 4083
} | class ____(fixtures.MappedTest):
"""test for [ticket:2885]"""
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(50)),
)
Table(
"child",
metadata,
# 1. name of column must be different, so that we rely on
# mapper._table_to_equated to link the two cols
Column(
"child_id", Integer, ForeignKey("base.id"), primary_key=True
),
Column("name", String(50)),
)
@classmethod
def setup_classes(cls):
class Base(cls.Comparable):
pass
class Child(Base):
pass
def test_map_to_select(self):
Base, Child = self.classes.Base, self.classes.Child
base, child = self.tables.base, self.tables.child
base_select = select(base).alias()
self.mapper_registry.map_imperatively(
Base,
base_select,
polymorphic_on=base_select.c.type,
polymorphic_identity="base",
)
self.mapper_registry.map_imperatively(
Child, child, inherits=Base, polymorphic_identity="child"
)
sess = fixture_session()
# 2. use an id other than "1" here so can't rely on
# the two inserts having the same id
c1 = Child(id=12, name="c1")
sess.add(c1)
sess.commit()
sess.close()
c1 = sess.query(Child).one()
eq_(c1.name, "c1")
| JoinFromSelectPersistenceTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1421102,
"end": 1421294
} | class ____(VegaLiteSchema):
"""TitleOrient schema wrapper."""
_schema = {"$ref": "#/definitions/TitleOrient"}
def __init__(self, *args):
super().__init__(*args)
| TitleOrient |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 140922,
"end": 141412
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
name: Optional[str] = Field(
None,
description="The name of the defined parameter. May only contain alphanumeric characters, _, -, and .",
examples=["table"],
)
default: Optional[str] = Field(
None,
description="Default value of the parameter.",
examples=["users"],
)
| JobParameter |
python | pytorch__pytorch | test/inductor/test_cache.py | {
"start": 655,
"end": 4962
} | class ____:
@staticmethod
def abstract_cache_types() -> set[type[icache.Cache]]:
return {icache.Cache, icache.AsyncCache}
@staticmethod
def cache_types() -> Sequence[type[icache.Cache]]:
cache_types: list[type[icache.Cache]] = []
for obj_name in dir(icache):
obj = getattr(icache, obj_name)
if not isclass(obj) or not issubclass(obj, icache.Cache):
continue
if obj in TestMixin.abstract_cache_types():
continue
cache_types.append(obj)
return cache_types
@staticmethod
def async_cache_types() -> Sequence[type[icache.AsyncCache]]:
return [
cache_type
for cache_type in TestMixin.cache_types()
if issubclass(cache_type, icache.AsyncCache)
]
@staticmethod
def on_disk_cache_types() -> Sequence[type[icache.OnDiskCache]]:
return [
cache_type
for cache_type in TestMixin.cache_types()
if issubclass(cache_type, icache.OnDiskCache)
]
@staticmethod
def key_types() -> Sequence[type[icache.Key]]:
return [*icache.Key.__constraints__]
@staticmethod
def value_types() -> Sequence[type[icache.Value]]:
return [*icache.Value.__constraints__]
@staticmethod
def cache_type_supports_key_and_value_types(
cache_type: type[icache.Cache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> bool:
assert len(cache_type.__orig_bases__) == 1
generic_base = cache_type.__orig_bases__[0]
_key_type, _value_type = generic_base.__args__
if ((_key_type != icache.Key) and (_key_type != key_type)) or (
(_value_type != icache.Value) and (_value_type != value_type)
):
return False
return True
def key_not_in(
self: Self,
cache: icache.Cache[icache.Key, icache.Value],
key_fn: Callable[[], icache.Key],
) -> icache.Key:
while cache.get(key := key_fn()) is not None:
continue
return key
def keys_not_in(
self: Self,
cache: icache.Cache[icache.Key, icache.Value],
key_fn: Callable[[], icache.Key],
num: int,
) -> list[icache.Key]:
keys = []
while len(keys) < num:
if (key := self.key_not_in(cache, key_fn)) not in keys:
keys.append(key)
return keys
def key(self: Self, key_type: type[icache.Key]) -> icache.Key:
if key_type is str:
return f"s{randint(0, 2**32)}"
elif key_type is int:
return randint(0, 2**32)
elif key_type == tuple[Any, ...]:
return (self.key(str), self.key(int))
else:
raise NotImplementedError
def values_unalike(
self: Self, value_fn: Callable[[], icache.Value], num: int
) -> list[icache.Value]:
values = []
while len(values) < num:
if (value := value_fn()) not in values:
values.append(value)
return values
def value(self: Self, value_type: type[icache.Value]) -> icache.Value:
if value_type is str:
return f"s{randint(0, 2**32)}"
elif value_type is int:
return randint(0, 2**32)
elif value_type == tuple[Any, ...]:
return (self.value(str), self.value(int))
elif value_type is bytes:
return self.value(str).encode()
elif value_type == dict[Any, Any]:
return {
"zero": self.value(str),
1: self.value(int),
(2): self.value(tuple[Any, ...]),
b"three": self.value(bytes),
}
elif value_type == list[Any]:
return [self.value(str), self.value(int), self.value(dict[Any, Any])]
else:
raise NotImplementedError
def maybe_randomize_base_dir(self: Self, cache: icache.Cache) -> None:
# multi on disk caches might exist at any time, and the tests
# assume they are isolated so we should randomize their base dir
if isinstance(cache, icache.OnDiskCache):
cache.base_dir = cache.base_dir / f"{hash(cache)}"
@instantiate_parametrized_tests
| TestMixin |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_prime_number.py | {
"start": 1793,
"end": 4274
} | class ____(ColumnMapExpectation):
"""Expect column values to be prime numbers."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_prime": [
"2",
"3",
"5",
"7",
"11",
],
"some_other": [
"8",
"16",
"7",
"11",
"1",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_prime"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.prime_number"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["sympy"],
}
if __name__ == "__main__":
ExpectColumnValuesToBePrimeNumber().print_diagnostic_checklist()
| ExpectColumnValuesToBePrimeNumber |
python | apache__avro | lang/py/avro/schema.py | {
"start": 37772,
"end": 38228
} | class ____(LogicalSchema, PrimitiveSchema):
def __init__(self, other_props=None):
LogicalSchema.__init__(self, avro.constants.TIMESTAMP_MICROS)
PrimitiveSchema.__init__(self, "long", other_props)
def to_json(self, names=None):
return self.props
def validate(self, datum):
return self if isinstance(datum, datetime.datetime) and _is_timezone_aware_datetime(datum) else None
#
# uuid Type
#
| TimestampMicrosSchema |
python | sympy__sympy | sympy/printing/pretty/pretty.py | {
"start": 1186,
"end": 106246
} | class ____(Printer):
"""Printer, which converts an expression into 2D ASCII-art figure."""
printmethod = "_pretty"
_default_settings = {
"order": None,
"full_prec": "auto",
"use_unicode": None,
"wrap_line": True,
"num_columns": None,
"use_unicode_sqrt_char": True,
"root_notation": True,
"mat_symbol_style": "plain",
"imaginary_unit": "i",
"perm_cyclic": True
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
if not isinstance(self._settings['imaginary_unit'], str):
raise TypeError("'imaginary_unit' must a string, not {}".format(self._settings['imaginary_unit']))
elif self._settings['imaginary_unit'] not in ("i", "j"):
raise ValueError("'imaginary_unit' must be either 'i' or 'j', not '{}'".format(self._settings['imaginary_unit']))
def emptyPrinter(self, expr):
return prettyForm(str(expr))
@property
def _use_unicode(self):
if self._settings['use_unicode']:
return True
else:
return pretty_use_unicode()
def doprint(self, expr):
return self._print(expr).render(**self._settings)
# empty op so _print(stringPict) returns the same
def _print_stringPict(self, e):
return e
def _print_basestring(self, e):
return prettyForm(e)
def _print_atan2(self, e):
pform = prettyForm(*self._print_seq(e.args).parens())
pform = prettyForm(*pform.left('atan2'))
return pform
def _print_Symbol(self, e, bold_name=False):
symb = pretty_symbol(e.name, bold_name)
return prettyForm(symb)
_print_RandomSymbol = _print_Symbol
def _print_MatrixSymbol(self, e):
return self._print_Symbol(e, self._settings['mat_symbol_style'] == "bold")
def _print_Float(self, e):
# we will use StrPrinter's Float printer, but we need to handle the
# full_prec ourselves, according to the self._print_level
full_prec = self._settings["full_prec"]
if full_prec == "auto":
full_prec = self._print_level == 1
return prettyForm(sstr(e, full_prec=full_prec))
def _print_Cross(self, e):
vec1 = e._expr1
vec2 = e._expr2
pform = self._print(vec2)
pform = prettyForm(*pform.left('('))
pform = prettyForm(*pform.right(')'))
pform = prettyForm(*pform.left(self._print(U('MULTIPLICATION SIGN'))))
pform = prettyForm(*pform.left(')'))
pform = prettyForm(*pform.left(self._print(vec1)))
pform = prettyForm(*pform.left('('))
return pform
def _print_Curl(self, e):
vec = e._expr
pform = self._print(vec)
pform = prettyForm(*pform.left('('))
pform = prettyForm(*pform.right(')'))
pform = prettyForm(*pform.left(self._print(U('MULTIPLICATION SIGN'))))
pform = prettyForm(*pform.left(self._print(U('NABLA'))))
return pform
def _print_Divergence(self, e):
vec = e._expr
pform = self._print(vec)
pform = prettyForm(*pform.left('('))
pform = prettyForm(*pform.right(')'))
pform = prettyForm(*pform.left(self._print(U('DOT OPERATOR'))))
pform = prettyForm(*pform.left(self._print(U('NABLA'))))
return pform
def _print_Dot(self, e):
vec1 = e._expr1
vec2 = e._expr2
pform = self._print(vec2)
pform = prettyForm(*pform.left('('))
pform = prettyForm(*pform.right(')'))
pform = prettyForm(*pform.left(self._print(U('DOT OPERATOR'))))
pform = prettyForm(*pform.left(')'))
pform = prettyForm(*pform.left(self._print(vec1)))
pform = prettyForm(*pform.left('('))
return pform
def _print_Gradient(self, e):
func = e._expr
pform = self._print(func)
pform = prettyForm(*pform.left('('))
pform = prettyForm(*pform.right(')'))
pform = prettyForm(*pform.left(self._print(U('NABLA'))))
return pform
def _print_Laplacian(self, e):
func = e._expr
pform = self._print(func)
pform = prettyForm(*pform.left('('))
pform = prettyForm(*pform.right(')'))
pform = prettyForm(*pform.left(self._print(U('INCREMENT'))))
return pform
def _print_Atom(self, e):
try:
# print atoms like Exp1 or Pi
return prettyForm(pretty_atom(e.__class__.__name__, printer=self))
except KeyError:
return self.emptyPrinter(e)
# Infinity inherits from Number, so we have to override _print_XXX order
_print_Infinity = _print_Atom
_print_NegativeInfinity = _print_Atom
_print_EmptySet = _print_Atom
_print_Naturals = _print_Atom
_print_Naturals0 = _print_Atom
_print_Integers = _print_Atom
_print_Rationals = _print_Atom
_print_Complexes = _print_Atom
_print_EmptySequence = _print_Atom
def _print_Reals(self, e):
if self._use_unicode:
return self._print_Atom(e)
else:
inf_list = ['-oo', 'oo']
return self._print_seq(inf_list, '(', ')')
def _print_subfactorial(self, e):
x = e.args[0]
pform = self._print(x)
# Add parentheses if needed
if not ((x.is_Integer and x.is_nonnegative) or x.is_Symbol):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('!'))
return pform
def _print_factorial(self, e):
x = e.args[0]
pform = self._print(x)
# Add parentheses if needed
if not ((x.is_Integer and x.is_nonnegative) or x.is_Symbol):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right('!'))
return pform
def _print_factorial2(self, e):
x = e.args[0]
pform = self._print(x)
# Add parentheses if needed
if not ((x.is_Integer and x.is_nonnegative) or x.is_Symbol):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right('!!'))
return pform
def _print_binomial(self, e):
n, k = e.args
n_pform = self._print(n)
k_pform = self._print(k)
bar = ' '*max(n_pform.width(), k_pform.width())
pform = prettyForm(*k_pform.above(bar))
pform = prettyForm(*pform.above(n_pform))
pform = prettyForm(*pform.parens('(', ')'))
pform.baseline = (pform.baseline + 1)//2
return pform
def _print_Relational(self, e):
op = prettyForm(' ' + xsym(e.rel_op) + ' ')
l = self._print(e.lhs)
r = self._print(e.rhs)
pform = prettyForm(*stringPict.next(l, op, r), binding=prettyForm.OPEN)
return pform
def _print_Not(self, e):
from sympy.logic.boolalg import (Equivalent, Implies)
if self._use_unicode:
arg = e.args[0]
pform = self._print(arg)
if isinstance(arg, Equivalent):
return self._print_Equivalent(arg, altchar=pretty_atom('NotEquiv'))
if isinstance(arg, Implies):
return self._print_Implies(arg, altchar=pretty_atom('NotArrow'))
if arg.is_Boolean and not arg.is_Not:
pform = prettyForm(*pform.parens())
return prettyForm(*pform.left(pretty_atom('Not')))
else:
return self._print_Function(e)
def __print_Boolean(self, e, char, sort=True):
args = e.args
if sort:
args = sorted(e.args, key=default_sort_key)
arg = args[0]
pform = self._print(arg)
if arg.is_Boolean and not arg.is_Not:
pform = prettyForm(*pform.parens())
for arg in args[1:]:
pform_arg = self._print(arg)
if arg.is_Boolean and not arg.is_Not:
pform_arg = prettyForm(*pform_arg.parens())
pform = prettyForm(*pform.right(' %s ' % char))
pform = prettyForm(*pform.right(pform_arg))
return pform
def _print_And(self, e):
if self._use_unicode:
return self.__print_Boolean(e, pretty_atom('And'))
else:
return self._print_Function(e, sort=True)
def _print_Or(self, e):
if self._use_unicode:
return self.__print_Boolean(e, pretty_atom('Or'))
else:
return self._print_Function(e, sort=True)
def _print_Xor(self, e):
if self._use_unicode:
return self.__print_Boolean(e, pretty_atom("Xor"))
else:
return self._print_Function(e, sort=True)
def _print_Nand(self, e):
if self._use_unicode:
return self.__print_Boolean(e, pretty_atom('Nand'))
else:
return self._print_Function(e, sort=True)
def _print_Nor(self, e):
if self._use_unicode:
return self.__print_Boolean(e, pretty_atom('Nor'))
else:
return self._print_Function(e, sort=True)
def _print_Implies(self, e, altchar=None):
if self._use_unicode:
return self.__print_Boolean(e, altchar or pretty_atom('Arrow'), sort=False)
else:
return self._print_Function(e)
def _print_Equivalent(self, e, altchar=None):
if self._use_unicode:
return self.__print_Boolean(e, altchar or pretty_atom('Equiv'))
else:
return self._print_Function(e, sort=True)
def _print_conjugate(self, e):
pform = self._print(e.args[0])
return prettyForm( *pform.above( hobj('_', pform.width())) )
def _print_Abs(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('|', '|'))
return pform
def _print_floor(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('lfloor', 'rfloor'))
return pform
else:
return self._print_Function(e)
def _print_ceiling(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('lceil', 'rceil'))
return pform
else:
return self._print_Function(e)
def _print_Derivative(self, deriv):
if requires_partial(deriv.expr) and self._use_unicode:
deriv_symbol = U('PARTIAL DIFFERENTIAL')
else:
deriv_symbol = r'd'
x = None
count_total_deriv = 0
for sym, num in reversed(deriv.variable_count):
s = self._print(sym)
ds = prettyForm(*s.left(deriv_symbol))
count_total_deriv += num
if (not num.is_Integer) or (num > 1):
ds = ds**prettyForm(str(num))
if x is None:
x = ds
else:
x = prettyForm(*x.right(' '))
x = prettyForm(*x.right(ds))
f = prettyForm(
binding=prettyForm.FUNC, *self._print(deriv.expr).parens())
pform = prettyForm(deriv_symbol)
if (count_total_deriv > 1) != False:
pform = pform**prettyForm(str(count_total_deriv))
pform = prettyForm(*pform.below(stringPict.LINE, x))
pform.baseline = pform.baseline + 1
pform = prettyForm(*stringPict.next(pform, f))
pform.binding = prettyForm.MUL
return pform
def _print_Cycle(self, dc):
from sympy.combinatorics.permutations import Permutation, Cycle
# for Empty Cycle
if dc == Cycle():
cyc = stringPict('')
return prettyForm(*cyc.parens())
dc_list = Permutation(dc.list()).cyclic_form
# for Identity Cycle
if dc_list == []:
cyc = self._print(dc.size - 1)
return prettyForm(*cyc.parens())
cyc = stringPict('')
for i in dc_list:
l = self._print(str(tuple(i)).replace(',', ''))
cyc = prettyForm(*cyc.right(l))
return cyc
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation, Cycle
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
sympy_deprecation_warning(
f"""
Setting Permutation.print_cyclic is deprecated. Instead use
init_printing(perm_cyclic={perm_cyclic}).
""",
deprecated_since_version="1.6",
active_deprecations_target="deprecated-permutation-print_cyclic",
stacklevel=7,
)
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
return self._print_Cycle(Cycle(expr))
lower = expr.array_form
upper = list(range(len(lower)))
result = stringPict('')
first = True
for u, l in zip(upper, lower):
s1 = self._print(u)
s2 = self._print(l)
col = prettyForm(*s1.below(s2))
if first:
first = False
else:
col = prettyForm(*col.left(" "))
result = prettyForm(*result.right(col))
return prettyForm(*result.parens())
def _print_Integral(self, integral):
f = integral.function
# Add parentheses if arg involves addition of terms and
# create a pretty form for the argument
prettyF = self._print(f)
# XXX generalize parens
if f.is_Add:
prettyF = prettyForm(*prettyF.parens())
# dx dy dz ...
arg = prettyF
for x in integral.limits:
prettyArg = self._print(x[0])
# XXX qparens (parens if needs-parens)
if prettyArg.width() > 1:
prettyArg = prettyForm(*prettyArg.parens())
arg = prettyForm(*arg.right(' d', prettyArg))
# \int \int \int ...
firstterm = True
s = None
for lim in integral.limits:
# Create bar based on the height of the argument
h = arg.height()
H = h + 2
# XXX hack!
ascii_mode = not self._use_unicode
if ascii_mode:
H += 2
vint = vobj('int', H)
# Construct the pretty form with the integral sign and the argument
pform = prettyForm(vint)
pform.baseline = arg.baseline + (
H - h)//2 # covering the whole argument
if len(lim) > 1:
# Create pretty forms for endpoints, if definite integral.
# Do not print empty endpoints.
if len(lim) == 2:
prettyA = prettyForm("")
prettyB = self._print(lim[1])
if len(lim) == 3:
prettyA = self._print(lim[1])
prettyB = self._print(lim[2])
if ascii_mode: # XXX hack
# Add spacing so that endpoint can more easily be
# identified with the correct integral sign
spc = max(1, 3 - prettyB.width())
prettyB = prettyForm(*prettyB.left(' ' * spc))
spc = max(1, 4 - prettyA.width())
prettyA = prettyForm(*prettyA.right(' ' * spc))
pform = prettyForm(*pform.above(prettyB))
pform = prettyForm(*pform.below(prettyA))
if not ascii_mode: # XXX hack
pform = prettyForm(*pform.right(' '))
if firstterm:
s = pform # first term
firstterm = False
else:
s = prettyForm(*s.left(pform))
pform = prettyForm(*arg.left(s))
pform.binding = prettyForm.MUL
return pform
def _print_Product(self, expr):
func = expr.term
pretty_func = self._print(func)
horizontal_chr = xobj('_', 1)
corner_chr = xobj('_', 1)
vertical_chr = xobj('|', 1)
if self._use_unicode:
# use unicode corners
horizontal_chr = xobj('-', 1)
corner_chr = xobj('UpTack', 1)
func_height = pretty_func.height()
first = True
max_upper = 0
sign_height = 0
for lim in expr.limits:
pretty_lower, pretty_upper = self.__print_SumProduct_Limits(lim)
width = (func_height + 2) * 5 // 3 - 2
sign_lines = [horizontal_chr + corner_chr + (horizontal_chr * (width-2)) + corner_chr + horizontal_chr]
for _ in range(func_height + 1):
sign_lines.append(' ' + vertical_chr + (' ' * (width-2)) + vertical_chr + ' ')
pretty_sign = stringPict('')
pretty_sign = prettyForm(*pretty_sign.stack(*sign_lines))
max_upper = max(max_upper, pretty_upper.height())
if first:
sign_height = pretty_sign.height()
pretty_sign = prettyForm(*pretty_sign.above(pretty_upper))
pretty_sign = prettyForm(*pretty_sign.below(pretty_lower))
if first:
pretty_func.baseline = 0
first = False
height = pretty_sign.height()
padding = stringPict('')
padding = prettyForm(*padding.stack(*[' ']*(height - 1)))
pretty_sign = prettyForm(*pretty_sign.right(padding))
pretty_func = prettyForm(*pretty_sign.right(pretty_func))
pretty_func.baseline = max_upper + sign_height//2
pretty_func.binding = prettyForm.MUL
return pretty_func
def __print_SumProduct_Limits(self, lim):
def print_start(lhs, rhs):
op = prettyForm(' ' + xsym("==") + ' ')
l = self._print(lhs)
r = self._print(rhs)
pform = prettyForm(*stringPict.next(l, op, r))
return pform
prettyUpper = self._print(lim[2])
prettyLower = print_start(lim[0], lim[1])
return prettyLower, prettyUpper
def _print_Sum(self, expr):
ascii_mode = not self._use_unicode
def asum(hrequired, lower, upper, use_ascii):
def adjust(s, wid=None, how='<^>'):
if not wid or len(s) > wid:
return s
need = wid - len(s)
if how in ('<^>', "<") or how not in list('<^>'):
return s + ' '*need
half = need//2
lead = ' '*half
if how == ">":
return " "*need + s
return lead + s + ' '*(need - len(lead))
h = max(hrequired, 2)
d = h//2
w = d + 1
more = hrequired % 2
lines = []
if use_ascii:
lines.append("_"*(w) + ' ')
lines.append(r"\%s`" % (' '*(w - 1)))
for i in range(1, d):
lines.append('%s\\%s' % (' '*i, ' '*(w - i)))
if more:
lines.append('%s)%s' % (' '*(d), ' '*(w - d)))
for i in reversed(range(1, d)):
lines.append('%s/%s' % (' '*i, ' '*(w - i)))
lines.append("/" + "_"*(w - 1) + ',')
return d, h + more, lines, more
else:
w = w + more
d = d + more
vsum = vobj('sum', 4)
lines.append("_"*(w))
for i in range(0, d):
lines.append('%s%s%s' % (' '*i, vsum[2], ' '*(w - i - 1)))
for i in reversed(range(0, d)):
lines.append('%s%s%s' % (' '*i, vsum[4], ' '*(w - i - 1)))
lines.append(vsum[8]*(w))
return d, h + 2*more, lines, more
f = expr.function
prettyF = self._print(f)
if f.is_Add: # add parens
prettyF = prettyForm(*prettyF.parens())
H = prettyF.height() + 2
# \sum \sum \sum ...
first = True
max_upper = 0
sign_height = 0
for lim in expr.limits:
prettyLower, prettyUpper = self.__print_SumProduct_Limits(lim)
max_upper = max(max_upper, prettyUpper.height())
# Create sum sign based on the height of the argument
d, h, slines, adjustment = asum(
H, prettyLower.width(), prettyUpper.width(), ascii_mode)
prettySign = stringPict('')
prettySign = prettyForm(*prettySign.stack(*slines))
if first:
sign_height = prettySign.height()
prettySign = prettyForm(*prettySign.above(prettyUpper))
prettySign = prettyForm(*prettySign.below(prettyLower))
if first:
# change F baseline so it centers on the sign
prettyF.baseline -= d - (prettyF.height()//2 -
prettyF.baseline)
first = False
# put padding to the right
pad = stringPict('')
pad = prettyForm(*pad.stack(*[' ']*h))
prettySign = prettyForm(*prettySign.right(pad))
# put the present prettyF to the right
prettyF = prettyForm(*prettySign.right(prettyF))
# adjust baseline of ascii mode sigma with an odd height so that it is
# exactly through the center
ascii_adjustment = ascii_mode if not adjustment else 0
prettyF.baseline = max_upper + sign_height//2 + ascii_adjustment
prettyF.binding = prettyForm.MUL
return prettyF
def _print_Limit(self, l):
e, z, z0, dir = l.args
E = self._print(e)
if precedence(e) <= PRECEDENCE["Mul"]:
E = prettyForm(*E.parens('(', ')'))
Lim = prettyForm('lim')
LimArg = self._print(z)
if self._use_unicode:
LimArg = prettyForm(*LimArg.right(f"{xobj('-', 1)}{pretty_atom('Arrow')}"))
else:
LimArg = prettyForm(*LimArg.right('->'))
LimArg = prettyForm(*LimArg.right(self._print(z0)))
if str(dir) == '+-' or z0 in (S.Infinity, S.NegativeInfinity):
dir = ""
else:
if self._use_unicode:
dir = pretty_atom('SuperscriptPlus') if str(dir) == "+" else pretty_atom('SuperscriptMinus')
LimArg = prettyForm(*LimArg.right(self._print(dir)))
Lim = prettyForm(*Lim.below(LimArg))
Lim = prettyForm(*Lim.right(E), binding=prettyForm.MUL)
return Lim
def _print_matrix_contents(self, e):
"""
This method factors out what is essentially grid printing.
"""
M = e # matrix
Ms = {} # i,j -> pretty(M[i,j])
for i in range(M.rows):
for j in range(M.cols):
Ms[i, j] = self._print(M[i, j])
# h- and v- spacers
hsep = 2
vsep = 1
# max width for columns
maxw = [-1] * M.cols
for j in range(M.cols):
maxw[j] = max([Ms[i, j].width() for i in range(M.rows)] or [0])
# drawing result
D = None
for i in range(M.rows):
D_row = None
for j in range(M.cols):
s = Ms[i, j]
# reshape s to maxw
# XXX this should be generalized, and go to stringPict.reshape ?
assert s.width() <= maxw[j]
# hcenter it, +0.5 to the right 2
# ( it's better to align formula starts for say 0 and r )
# XXX this is not good in all cases -- maybe introduce vbaseline?
left, right = center_pad(s.width(), maxw[j])
s = prettyForm(*s.right(right))
s = prettyForm(*s.left(left))
# we don't need vcenter cells -- this is automatically done in
# a pretty way because when their baselines are taking into
# account in .right()
if D_row is None:
D_row = s # first box in a row
continue
D_row = prettyForm(*D_row.right(' '*hsep)) # h-spacer
D_row = prettyForm(*D_row.right(s))
if D is None:
D = D_row # first row in a picture
continue
# v-spacer
for _ in range(vsep):
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
if D is None:
D = prettyForm('') # Empty Matrix
return D
def _print_MatrixBase(self, e, lparens='[', rparens=']'):
D = self._print_matrix_contents(e)
D.baseline = D.height()//2
D = prettyForm(*D.parens(lparens, rparens))
return D
def _print_Determinant(self, e):
mat = e.arg
if mat.is_MatrixExpr:
from sympy.matrices.expressions.blockmatrix import BlockMatrix
if isinstance(mat, BlockMatrix):
return self._print_MatrixBase(mat.blocks, lparens='|', rparens='|')
D = self._print(mat)
D.baseline = D.height()//2
return prettyForm(*D.parens('|', '|'))
else:
return self._print_MatrixBase(mat, lparens='|', rparens='|')
def _print_TensorProduct(self, expr):
# This should somehow share the code with _print_WedgeProduct:
if self._use_unicode:
circled_times = "\u2297"
else:
circled_times = ".*"
return self._print_seq(expr.args, None, None, circled_times,
parenthesize=lambda x: precedence_traditional(x) <= PRECEDENCE["Mul"])
def _print_WedgeProduct(self, expr):
# This should somehow share the code with _print_TensorProduct:
if self._use_unicode:
wedge_symbol = "\u2227"
else:
wedge_symbol = '/\\'
return self._print_seq(expr.args, None, None, wedge_symbol,
parenthesize=lambda x: precedence_traditional(x) <= PRECEDENCE["Mul"])
def _print_Trace(self, e):
D = self._print(e.arg)
D = prettyForm(*D.parens('(',')'))
D.baseline = D.height()//2
D = prettyForm(*D.left('\n'*(0) + 'tr'))
return D
def _print_MatrixElement(self, expr):
from sympy.matrices import MatrixSymbol
if (isinstance(expr.parent, MatrixSymbol)
and expr.i.is_number and expr.j.is_number):
return self._print(
Symbol(expr.parent.name + '_%d%d' % (expr.i, expr.j)))
else:
prettyFunc = self._print(expr.parent)
prettyFunc = prettyForm(*prettyFunc.parens())
prettyIndices = self._print_seq((expr.i, expr.j), delimiter=', '
).parens(left='[', right=']')[0]
pform = prettyForm(binding=prettyForm.FUNC,
*stringPict.next(prettyFunc, prettyIndices))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyIndices
return pform
def _print_MatrixSlice(self, m):
# XXX works only for applied functions
from sympy.matrices import MatrixSymbol
prettyFunc = self._print(m.parent)
if not isinstance(m.parent, MatrixSymbol):
prettyFunc = prettyForm(*prettyFunc.parens())
def ppslice(x, dim):
x = list(x)
if x[2] == 1:
del x[2]
if x[0] == 0:
x[0] = ''
if x[1] == dim:
x[1] = ''
return prettyForm(*self._print_seq(x, delimiter=':'))
prettyArgs = self._print_seq((ppslice(m.rowslice, m.parent.rows),
ppslice(m.colslice, m.parent.cols)), delimiter=', ').parens(left='[', right=']')[0]
pform = prettyForm(
binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_Transpose(self, expr):
mat = expr.arg
pform = self._print(mat)
from sympy.matrices import MatrixSymbol, BlockMatrix
if (not isinstance(mat, MatrixSymbol) and
not isinstance(mat, BlockMatrix) and mat.is_MatrixExpr):
pform = prettyForm(*pform.parens())
pform = pform**(prettyForm('T'))
return pform
def _print_Adjoint(self, expr):
mat = expr.arg
pform = self._print(mat)
if self._use_unicode:
dag = prettyForm(pretty_atom('Dagger'))
else:
dag = prettyForm('+')
from sympy.matrices import MatrixSymbol, BlockMatrix
if (not isinstance(mat, MatrixSymbol) and
not isinstance(mat, BlockMatrix) and mat.is_MatrixExpr):
pform = prettyForm(*pform.parens())
pform = pform**dag
return pform
def _print_BlockMatrix(self, B):
if B.blocks.shape == (1, 1):
return self._print(B.blocks[0, 0])
return self._print(B.blocks)
def _print_MatAdd(self, expr):
s = None
for item in expr.args:
pform = self._print(item)
if s is None:
s = pform # First element
else:
coeff = item.as_coeff_mmul()[0]
if S(coeff).could_extract_minus_sign():
s = prettyForm(*stringPict.next(s, ' '))
pform = self._print(item)
else:
s = prettyForm(*stringPict.next(s, ' + '))
s = prettyForm(*stringPict.next(s, pform))
return s
def _print_MatMul(self, expr):
args = list(expr.args)
from sympy.matrices.expressions.hadamard import HadamardProduct
from sympy.matrices.expressions.kronecker import KroneckerProduct
from sympy.matrices.expressions.matadd import MatAdd
for i, a in enumerate(args):
if (isinstance(a, (Add, MatAdd, HadamardProduct, KroneckerProduct))
and len(expr.args) > 1):
args[i] = prettyForm(*self._print(a).parens())
else:
args[i] = self._print(a)
return prettyForm.__mul__(*args)
def _print_Identity(self, expr):
if self._use_unicode:
return prettyForm(pretty_atom('IdentityMatrix'))
else:
return prettyForm('I')
def _print_ZeroMatrix(self, expr):
if self._use_unicode:
return prettyForm(pretty_atom('ZeroMatrix'))
else:
return prettyForm('0')
def _print_OneMatrix(self, expr):
if self._use_unicode:
return prettyForm(pretty_atom("OneMatrix"))
else:
return prettyForm('1')
def _print_MatrixUnit(self, expr):
if self._use_unicode:
s = self._print(Symbol(f'{pretty_atom("MatrixUnit")}_{expr._i}{expr._j}'))
else:
s = self._print(Symbol(f'E_{expr._i}{expr._j}'))
return s
def _print_DotProduct(self, expr):
args = list(expr.args)
for i, a in enumerate(args):
args[i] = self._print(a)
return prettyForm.__mul__(*args)
def _print_MatPow(self, expr):
pform = self._print(expr.base)
from sympy.matrices import MatrixSymbol
if not isinstance(expr.base, MatrixSymbol) and expr.base.is_MatrixExpr:
pform = prettyForm(*pform.parens())
pform = pform**(self._print(expr.exp))
return pform
def _print_HadamardProduct(self, expr):
from sympy.matrices.expressions.hadamard import HadamardProduct
from sympy.matrices.expressions.matadd import MatAdd
from sympy.matrices.expressions.matmul import MatMul
if self._use_unicode:
delim = pretty_atom('Ring')
else:
delim = '.*'
return self._print_seq(expr.args, None, None, delim,
parenthesize=lambda x: isinstance(x, (MatAdd, MatMul, HadamardProduct)))
def _print_HadamardPower(self, expr):
# from sympy import MatAdd, MatMul
if self._use_unicode:
circ = pretty_atom('Ring')
else:
circ = self._print('.')
pretty_base = self._print(expr.base)
pretty_exp = self._print(expr.exp)
if precedence(expr.exp) < PRECEDENCE["Mul"]:
pretty_exp = prettyForm(*pretty_exp.parens())
pretty_circ_exp = prettyForm(
binding=prettyForm.LINE,
*stringPict.next(circ, pretty_exp)
)
return pretty_base**pretty_circ_exp
def _print_KroneckerProduct(self, expr):
from sympy.matrices.expressions.matadd import MatAdd
from sympy.matrices.expressions.matmul import MatMul
if self._use_unicode:
delim = f" {pretty_atom('TensorProduct')} "
else:
delim = ' x '
return self._print_seq(expr.args, None, None, delim,
parenthesize=lambda x: isinstance(x, (MatAdd, MatMul)))
def _print_FunctionMatrix(self, X):
D = self._print(X.lamda.expr)
D = prettyForm(*D.parens('[', ']'))
return D
def _print_TransferFunction(self, expr):
if not expr.num == 1:
num, den = expr.num, expr.den
res = Mul(num, Pow(den, -1, evaluate=False), evaluate=False)
return self._print_Mul(res)
else:
return self._print(1)/self._print(expr.den)
def _print_DiscreteTransferFunction(self, expr):
if not expr.num == 1:
res = Mul(expr.num, Pow(expr.den, -1, evaluate=False),
evaluate=False)
result = self._print_Mul(res)
else:
result = self._print(1)/self._print(expr.den)
result = prettyForm(\
*result.right(f" [st: {expr.sampling_time}]"))
return result
def _print_Series(self, expr):
args = list(expr.args)
for i, a in enumerate(expr.args):
args[i] = prettyForm(*self._print(a).parens())
return prettyForm.__mul__(*args)
def _print_MIMOSeries(self, expr):
from sympy.physics.control.lti import MIMOParallel
args = list(expr.args)
pretty_args = []
for a in reversed(args):
if (isinstance(a, MIMOParallel) and len(expr.args) > 1):
expression = self._print(a)
expression.baseline = expression.height()//2
pretty_args.append(prettyForm(*expression.parens()))
else:
expression = self._print(a)
expression.baseline = expression.height()//2
pretty_args.append(expression)
return prettyForm.__mul__(*pretty_args)
def _print_Parallel(self, expr):
s = None
for item in expr.args:
pform = self._print(item)
if s is None:
s = pform # First element
else:
s = prettyForm(*stringPict.next(s))
s.baseline = s.height()//2
s = prettyForm(*stringPict.next(s, ' + '))
s = prettyForm(*stringPict.next(s, pform))
return s
def _print_MIMOParallel(self, expr):
from sympy.physics.control.lti import TransferFunctionMatrix
s = None
for item in expr.args:
pform = self._print(item)
if s is None:
s = pform # First element
else:
s = prettyForm(*stringPict.next(s))
s.baseline = s.height()//2
s = prettyForm(*stringPict.next(s, ' + '))
if isinstance(item, TransferFunctionMatrix):
s.baseline = s.height() - 1
s = prettyForm(*stringPict.next(s, pform))
# s.baseline = s.height()//2
return s
def _print_Feedback(self, expr):
from sympy.physics.control import TransferFunction, Series
num, tf = expr.sys1, TransferFunction(1, 1, expr.var)
num_arg_list = list(num.args) if isinstance(num, Series) else [num]
den_arg_list = list(expr.sys2.args) if \
isinstance(expr.sys2, Series) else [expr.sys2]
if isinstance(num, Series) and isinstance(expr.sys2, Series):
den = Series(*num_arg_list, *den_arg_list)
elif isinstance(num, Series) and isinstance(expr.sys2, TransferFunction):
if expr.sys2 == tf:
den = Series(*num_arg_list)
else:
den = Series(*num_arg_list, expr.sys2)
elif isinstance(num, TransferFunction) and isinstance(expr.sys2, Series):
if num == tf:
den = Series(*den_arg_list)
else:
den = Series(num, *den_arg_list)
else:
if num == tf:
den = Series(*den_arg_list)
elif expr.sys2 == tf:
den = Series(*num_arg_list)
else:
den = Series(*num_arg_list, *den_arg_list)
denom = prettyForm(*stringPict.next(self._print(tf)))
denom.baseline = denom.height()//2
denom = prettyForm(*stringPict.next(denom, ' + ')) if expr.sign == -1 \
else prettyForm(*stringPict.next(denom, ' - '))
denom = prettyForm(*stringPict.next(denom, self._print(den)))
return self._print(num)/denom
def _print_MIMOFeedback(self, expr):
from sympy.physics.control import MIMOSeries, TransferFunctionMatrix
inv_mat = self._print(MIMOSeries(expr.sys2, expr.sys1))
plant = self._print(expr.sys1)
_feedback = prettyForm(*stringPict.next(inv_mat))
_feedback = prettyForm(*stringPict.right("I + ", _feedback)) if expr.sign == -1 \
else prettyForm(*stringPict.right("I - ", _feedback))
_feedback = prettyForm(*stringPict.parens(_feedback))
_feedback.baseline = 0
_feedback = prettyForm(*stringPict.right(_feedback, '-1 '))
_feedback.baseline = _feedback.height()//2
_feedback = prettyForm.__mul__(_feedback, prettyForm(" "))
if isinstance(expr.sys1, TransferFunctionMatrix):
_feedback.baseline = _feedback.height() - 1
_feedback = prettyForm(*stringPict.next(_feedback, plant))
return _feedback
def _print_TransferFunctionMatrix(self, expr):
mat = self._print(expr._expr_mat)
mat.baseline = mat.height() - 1
if expr.sampling_time == 0:
subscript = greek_unicode['tau'] if self._use_unicode else r'{t}'
else:
subscript = r'{k}'
mat = prettyForm(*mat.right(subscript))
if expr.sampling_time == 0:
return mat
return prettyForm(*mat.below(f"[st: {expr.sampling_time}]"))
def _print_StateSpace(self, expr):
from sympy.matrices.expressions.blockmatrix import BlockMatrix
A = expr._A
B = expr._B
C = expr._C
D = expr._D
mat = BlockMatrix([[A, B], [C, D]])
return self._print(mat.blocks)
def _print_DiscreteStateSpace(self, expr):
from sympy.matrices.expressions.blockmatrix import BlockMatrix
A = expr._A
B = expr._B
C = expr._C
D = expr._D
mat = BlockMatrix([[A, B], [C, D]])
mat = self._print(mat)
return prettyForm(*mat.below(f"\n[st: {expr.sampling_time}]"))
def _print_BasisDependent(self, expr):
from sympy.vector import Vector
if not self._use_unicode:
raise NotImplementedError("ASCII pretty printing of BasisDependent is not implemented")
if expr == expr.zero:
return prettyForm(expr.zero._pretty_form)
o1 = []
vectstrs = []
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key = lambda x: x[0].__str__())
for k, v in inneritems:
#if the coef of the basis vector is 1
#we skip the 1
if v == 1:
o1.append("" +
k._pretty_form)
#Same for -1
elif v == -1:
o1.append("(-1) " +
k._pretty_form)
#For a general expr
else:
#We always wrap the measure numbers in
#parentheses
arg_str = self._print(
v).parens()[0]
o1.append(arg_str + ' ' + k._pretty_form)
vectstrs.append(k._pretty_form)
#outstr = u("").join(o1)
if o1[0].startswith(" + "):
o1[0] = o1[0][3:]
elif o1[0].startswith(" "):
o1[0] = o1[0][1:]
#Fixing the newlines
lengths = []
strs = ['']
flag = []
for i, partstr in enumerate(o1):
flag.append(0)
# XXX: What is this hack?
if '\n' in partstr:
tempstr = partstr
tempstr = tempstr.replace(vectstrs[i], '')
if xobj(')_ext', 1) in tempstr: # If scalar is a fraction
for paren in range(len(tempstr)):
flag[i] = 1
if tempstr[paren] == xobj(')_ext', 1) and tempstr[paren + 1] == '\n':
# We want to place the vector string after all the right parentheses, because
# otherwise, the vector will be in the middle of the string
tempstr = tempstr[:paren] + xobj(')_ext', 1)\
+ ' ' + vectstrs[i] + tempstr[paren + 1:]
break
elif xobj(')_lower_hook', 1) in tempstr:
# We want to place the vector string after all the right parentheses, because
# otherwise, the vector will be in the middle of the string. For this reason,
# we insert the vector string at the rightmost index.
index = tempstr.rfind(xobj(')_lower_hook', 1))
if index != -1: # then this character was found in this string
flag[i] = 1
tempstr = tempstr[:index] + xobj(')_lower_hook', 1)\
+ ' ' + vectstrs[i] + tempstr[index + 1:]
o1[i] = tempstr
o1 = [x.split('\n') for x in o1]
n_newlines = max(len(x) for x in o1) # Width of part in its pretty form
if 1 in flag: # If there was a fractional scalar
for i, parts in enumerate(o1):
if len(parts) == 1: # If part has no newline
parts.insert(0, ' ' * (len(parts[0])))
flag[i] = 1
for i, parts in enumerate(o1):
lengths.append(len(parts[flag[i]]))
for j in range(n_newlines):
if j+1 <= len(parts):
if j >= len(strs):
strs.append(' ' * (sum(lengths[:-1]) +
3*(len(lengths)-1)))
if j == flag[i]:
strs[flag[i]] += parts[flag[i]] + ' + '
else:
strs[j] += parts[j] + ' '*(lengths[-1] -
len(parts[j])+
3)
else:
if j >= len(strs):
strs.append(' ' * (sum(lengths[:-1]) +
3*(len(lengths)-1)))
strs[j] += ' '*(lengths[-1]+3)
return prettyForm('\n'.join([s[:-3] for s in strs]))
def _print_NDimArray(self, expr):
from sympy.matrices.immutable import ImmutableMatrix
if expr.rank() == 0:
return self._print(expr[()])
level_str = [[]] + [[] for i in range(expr.rank())]
shape_ranges = [list(range(i)) for i in expr.shape]
# leave eventual matrix elements unflattened
mat = lambda x: ImmutableMatrix(x, evaluate=False)
for outer_i in itertools.product(*shape_ranges):
level_str[-1].append(expr[outer_i])
even = True
for back_outer_i in range(expr.rank()-1, -1, -1):
if len(level_str[back_outer_i+1]) < expr.shape[back_outer_i]:
break
if even:
level_str[back_outer_i].append(level_str[back_outer_i+1])
else:
level_str[back_outer_i].append(mat(
level_str[back_outer_i+1]))
if len(level_str[back_outer_i + 1]) == 1:
level_str[back_outer_i][-1] = mat(
[[level_str[back_outer_i][-1]]])
even = not even
level_str[back_outer_i+1] = []
out_expr = level_str[0][0]
if expr.rank() % 2 == 1:
out_expr = mat([out_expr])
return self._print(out_expr)
def _printer_tensor_indices(self, name, indices, index_map={}):
center = stringPict(pretty_symbol(name))
top = stringPict(" "*center.width())
bot = stringPict(" "*center.width())
last_valence = None
prev_map = None
for index in indices:
indpic = self._print(index.args[0])
if ((index in index_map) or prev_map) and last_valence == index.is_up:
if index.is_up:
top = prettyForm(*stringPict.next(top, ","))
else:
bot = prettyForm(*stringPict.next(bot, ","))
if index in index_map:
indpic = prettyForm(*stringPict.next(indpic, "="))
indpic = prettyForm(*stringPict.next(indpic, self._print(index_map[index])))
prev_map = True
else:
prev_map = False
if index.is_up:
top = stringPict(*top.right(indpic))
center = stringPict(*center.right(" "*indpic.width()))
bot = stringPict(*bot.right(" "*indpic.width()))
else:
bot = stringPict(*bot.right(indpic))
center = stringPict(*center.right(" "*indpic.width()))
top = stringPict(*top.right(" "*indpic.width()))
last_valence = index.is_up
pict = prettyForm(*center.above(top))
pict = prettyForm(*pict.below(bot))
return pict
def _print_Tensor(self, expr):
name = expr.args[0].name
indices = expr.get_indices()
return self._printer_tensor_indices(name, indices)
def _print_TensorElement(self, expr):
name = expr.expr.args[0].name
indices = expr.expr.get_indices()
index_map = expr.index_map
return self._printer_tensor_indices(name, indices, index_map)
def _print_TensMul(self, expr):
sign, args = expr._get_args_for_traditional_printer()
args = [
prettyForm(*self._print(i).parens()) if
precedence_traditional(i) < PRECEDENCE["Mul"] else self._print(i)
for i in args
]
pform = prettyForm.__mul__(*args)
if sign:
return prettyForm(*pform.left(sign))
else:
return pform
def _print_TensAdd(self, expr):
args = [
prettyForm(*self._print(i).parens()) if
precedence_traditional(i) < PRECEDENCE["Mul"] else self._print(i)
for i in expr.args
]
return prettyForm.__add__(*args)
def _print_TensorIndex(self, expr):
sym = expr.args[0]
if not expr.is_up:
sym = -sym
return self._print(sym)
def _print_PartialDerivative(self, deriv):
if self._use_unicode:
deriv_symbol = U('PARTIAL DIFFERENTIAL')
else:
deriv_symbol = r'd'
x = None
for variable in reversed(deriv.variables):
s = self._print(variable)
ds = prettyForm(*s.left(deriv_symbol))
if x is None:
x = ds
else:
x = prettyForm(*x.right(' '))
x = prettyForm(*x.right(ds))
f = prettyForm(
binding=prettyForm.FUNC, *self._print(deriv.expr).parens())
pform = prettyForm(deriv_symbol)
if len(deriv.variables) > 1:
pform = pform**self._print(len(deriv.variables))
pform = prettyForm(*pform.below(stringPict.LINE, x))
pform.baseline = pform.baseline + 1
pform = prettyForm(*stringPict.next(pform, f))
pform.binding = prettyForm.MUL
return pform
def _print_Piecewise(self, pexpr):
P = {}
for n, ec in enumerate(pexpr.args):
P[n, 0] = self._print(ec.expr)
if ec.cond == True:
P[n, 1] = prettyForm('otherwise')
else:
P[n, 1] = prettyForm(
*prettyForm('for ').right(self._print(ec.cond)))
hsep = 2
vsep = 1
len_args = len(pexpr.args)
# max widths
maxw = [max(P[i, j].width() for i in range(len_args))
for j in range(2)]
# FIXME: Refactor this code and matrix into some tabular environment.
# drawing result
D = None
for i in range(len_args):
D_row = None
for j in range(2):
p = P[i, j]
assert p.width() <= maxw[j]
wdelta = maxw[j] - p.width()
wleft = wdelta // 2
wright = wdelta - wleft
p = prettyForm(*p.right(' '*wright))
p = prettyForm(*p.left(' '*wleft))
if D_row is None:
D_row = p
continue
D_row = prettyForm(*D_row.right(' '*hsep)) # h-spacer
D_row = prettyForm(*D_row.right(p))
if D is None:
D = D_row # first row in a picture
continue
# v-spacer
for _ in range(vsep):
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
D = prettyForm(*D.parens('{', ''))
D.baseline = D.height()//2
D.binding = prettyForm.OPEN
return D
def _print_ITE(self, ite):
from sympy.functions.elementary.piecewise import Piecewise
return self._print(ite.rewrite(Piecewise))
def _hprint_vec(self, v):
D = None
for a in v:
p = a
if D is None:
D = p
else:
D = prettyForm(*D.right(', '))
D = prettyForm(*D.right(p))
if D is None:
D = stringPict(' ')
return D
def _hprint_vseparator(self, p1, p2, left=None, right=None, delimiter='', ifascii_nougly=False):
if ifascii_nougly and not self._use_unicode:
return self._print_seq((p1, '|', p2), left=left, right=right,
delimiter=delimiter, ifascii_nougly=True)
tmp = self._print_seq((p1, p2,), left=left, right=right, delimiter=delimiter)
sep = stringPict(vobj('|', tmp.height()), baseline=tmp.baseline)
return self._print_seq((p1, sep, p2), left=left, right=right,
delimiter=delimiter)
def _print_hyper(self, e):
# FIXME refactor Matrix, Piecewise, and this into a tabular environment
ap = [self._print(a) for a in e.ap]
bq = [self._print(b) for b in e.bq]
P = self._print(e.argument)
P.baseline = P.height()//2
# Drawing result - first create the ap, bq vectors
D = None
for v in [ap, bq]:
D_row = self._hprint_vec(v)
if D is None:
D = D_row # first row in a picture
else:
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
# make sure that the argument `z' is centred vertically
D.baseline = D.height()//2
# insert horizontal separator
P = prettyForm(*P.left(' '))
D = prettyForm(*D.right(' '))
# insert separating `|`
D = self._hprint_vseparator(D, P)
# add parens
D = prettyForm(*D.parens('(', ')'))
# create the F symbol
above = D.height()//2 - 1
below = D.height() - above - 1
sz, t, b, add, img = annotated('F')
F = prettyForm('\n' * (above - t) + img + '\n' * (below - b),
baseline=above + sz)
add = (sz + 1)//2
F = prettyForm(*F.left(self._print(len(e.ap))))
F = prettyForm(*F.right(self._print(len(e.bq))))
F.baseline = above + add
D = prettyForm(*F.right(' ', D))
return D
def _print_meijerg(self, e):
# FIXME refactor Matrix, Piecewise, and this into a tabular environment
v = {}
v[(0, 0)] = [self._print(a) for a in e.an]
v[(0, 1)] = [self._print(a) for a in e.aother]
v[(1, 0)] = [self._print(b) for b in e.bm]
v[(1, 1)] = [self._print(b) for b in e.bother]
P = self._print(e.argument)
P.baseline = P.height()//2
vp = {}
for idx in v:
vp[idx] = self._hprint_vec(v[idx])
for i in range(2):
maxw = max(vp[(0, i)].width(), vp[(1, i)].width())
for j in range(2):
s = vp[(j, i)]
left = (maxw - s.width()) // 2
right = maxw - left - s.width()
s = prettyForm(*s.left(' ' * left))
s = prettyForm(*s.right(' ' * right))
vp[(j, i)] = s
D1 = prettyForm(*vp[(0, 0)].right(' ', vp[(0, 1)]))
D1 = prettyForm(*D1.below(' '))
D2 = prettyForm(*vp[(1, 0)].right(' ', vp[(1, 1)]))
D = prettyForm(*D1.below(D2))
# make sure that the argument `z' is centred vertically
D.baseline = D.height()//2
# insert horizontal separator
P = prettyForm(*P.left(' '))
D = prettyForm(*D.right(' '))
# insert separating `|`
D = self._hprint_vseparator(D, P)
# add parens
D = prettyForm(*D.parens('(', ')'))
# create the G symbol
above = D.height()//2 - 1
below = D.height() - above - 1
sz, t, b, add, img = annotated('G')
F = prettyForm('\n' * (above - t) + img + '\n' * (below - b),
baseline=above + sz)
pp = self._print(len(e.ap))
pq = self._print(len(e.bq))
pm = self._print(len(e.bm))
pn = self._print(len(e.an))
def adjust(p1, p2):
diff = p1.width() - p2.width()
if diff == 0:
return p1, p2
elif diff > 0:
return p1, prettyForm(*p2.left(' '*diff))
else:
return prettyForm(*p1.left(' '*-diff)), p2
pp, pm = adjust(pp, pm)
pq, pn = adjust(pq, pn)
pu = prettyForm(*pm.right(', ', pn))
pl = prettyForm(*pp.right(', ', pq))
ht = F.baseline - above - 2
if ht > 0:
pu = prettyForm(*pu.below('\n'*ht))
p = prettyForm(*pu.below(pl))
F.baseline = above
F = prettyForm(*F.right(p))
F.baseline = above + add
D = prettyForm(*F.right(' ', D))
return D
def _print_ExpBase(self, e):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
base = prettyForm(pretty_atom('Exp1', 'e'))
return base ** self._print(e.args[0])
def _print_Exp1(self, e):
return prettyForm(pretty_atom('Exp1', 'e'))
def _print_Function(self, e, sort=False, func_name=None, left='(',
right=')'):
# optional argument func_name for supplying custom names
# XXX works only for applied functions
return self._helper_print_function(e.func, e.args, sort=sort, func_name=func_name, left=left, right=right)
def _print_mathieuc(self, e):
return self._print_Function(e, func_name='C')
def _print_mathieus(self, e):
return self._print_Function(e, func_name='S')
def _print_mathieucprime(self, e):
return self._print_Function(e, func_name="C'")
def _print_mathieusprime(self, e):
return self._print_Function(e, func_name="S'")
def _helper_print_function(self, func, args, sort=False, func_name=None,
delimiter=', ', elementwise=False, left='(',
right=')'):
if sort:
args = sorted(args, key=default_sort_key)
if not func_name and hasattr(func, "__name__"):
func_name = func.__name__
if func_name:
prettyFunc = self._print(Symbol(func_name))
else:
prettyFunc = prettyForm(*self._print(func).parens())
if elementwise:
if self._use_unicode:
circ = pretty_atom('Modifier Letter Low Ring')
else:
circ = '.'
circ = self._print(circ)
prettyFunc = prettyForm(
binding=prettyForm.LINE,
*stringPict.next(prettyFunc, circ)
)
prettyArgs = prettyForm(*self._print_seq(args, delimiter=delimiter).parens(
left=left, right=right))
pform = prettyForm(
binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_ElementwiseApplyFunction(self, e):
func = e.function
arg = e.expr
args = [arg]
return self._helper_print_function(func, args, delimiter="", elementwise=True)
@property
def _special_function_classes(self):
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.functions.special.gamma_functions import gamma, lowergamma
from sympy.functions.special.zeta_functions import lerchphi
from sympy.functions.special.beta_functions import beta
from sympy.functions.special.delta_functions import DiracDelta
from sympy.functions.special.error_functions import Chi
return {KroneckerDelta: [greek_unicode['delta'], 'delta'],
gamma: [greek_unicode['Gamma'], 'Gamma'],
lerchphi: [greek_unicode['Phi'], 'lerchphi'],
lowergamma: [greek_unicode['gamma'], 'gamma'],
beta: [greek_unicode['Beta'], 'B'],
DiracDelta: [greek_unicode['delta'], 'delta'],
Chi: ['Chi', 'Chi']}
def _print_FunctionClass(self, expr):
for cls in self._special_function_classes:
if issubclass(expr, cls) and expr.__name__ == cls.__name__:
if self._use_unicode:
return prettyForm(self._special_function_classes[cls][0])
else:
return prettyForm(self._special_function_classes[cls][1])
func_name = expr.__name__
return prettyForm(pretty_symbol(func_name))
def _print_GeometryEntity(self, expr):
# GeometryEntity is based on Tuple but should not print like a Tuple
return self.emptyPrinter(expr)
def _print_polylog(self, e):
subscript = self._print(e.args[0])
if self._use_unicode and is_subscriptable_in_unicode(subscript):
return self._print_Function(Function('Li_%s' % subscript)(e.args[1]))
return self._print_Function(e)
def _print_lerchphi(self, e):
func_name = greek_unicode['Phi'] if self._use_unicode else 'lerchphi'
return self._print_Function(e, func_name=func_name)
def _print_dirichlet_eta(self, e):
func_name = greek_unicode['eta'] if self._use_unicode else 'dirichlet_eta'
return self._print_Function(e, func_name=func_name)
def _print_Heaviside(self, e):
func_name = greek_unicode['theta'] if self._use_unicode else 'Heaviside'
if e.args[1] is S.Half:
pform = prettyForm(*self._print(e.args[0]).parens())
pform = prettyForm(*pform.left(func_name))
return pform
else:
return self._print_Function(e, func_name=func_name)
def _print_fresnels(self, e):
return self._print_Function(e, func_name="S")
def _print_fresnelc(self, e):
return self._print_Function(e, func_name="C")
def _print_airyai(self, e):
return self._print_Function(e, func_name="Ai")
def _print_airybi(self, e):
return self._print_Function(e, func_name="Bi")
def _print_airyaiprime(self, e):
return self._print_Function(e, func_name="Ai'")
def _print_airybiprime(self, e):
return self._print_Function(e, func_name="Bi'")
def _print_LambertW(self, e):
return self._print_Function(e, func_name="W")
def _print_Covariance(self, e):
return self._print_Function(e, func_name="Cov")
def _print_Variance(self, e):
return self._print_Function(e, func_name="Var")
def _print_Probability(self, e):
return self._print_Function(e, func_name="P")
def _print_Expectation(self, e):
return self._print_Function(e, func_name="E", left='[', right=']')
def _print_Lambda(self, e):
expr = e.expr
sig = e.signature
if self._use_unicode:
arrow = f" {pretty_atom('ArrowFromBar')} "
else:
arrow = " -> "
if len(sig) == 1 and sig[0].is_symbol:
sig = sig[0]
var_form = self._print(sig)
return prettyForm(*stringPict.next(var_form, arrow, self._print(expr)), binding=8)
def _print_Order(self, expr):
pform = self._print(expr.expr)
if (expr.point and any(p != S.Zero for p in expr.point)) or \
len(expr.variables) > 1:
pform = prettyForm(*pform.right("; "))
if len(expr.variables) > 1:
pform = prettyForm(*pform.right(self._print(expr.variables)))
elif len(expr.variables):
pform = prettyForm(*pform.right(self._print(expr.variables[0])))
if self._use_unicode:
pform = prettyForm(*pform.right(f" {pretty_atom('Arrow')} "))
else:
pform = prettyForm(*pform.right(" -> "))
if len(expr.point) > 1:
pform = prettyForm(*pform.right(self._print(expr.point)))
else:
pform = prettyForm(*pform.right(self._print(expr.point[0])))
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left("O"))
return pform
def _print_SingularityFunction(self, e):
if self._use_unicode:
shift = self._print(e.args[0]-e.args[1])
n = self._print(e.args[2])
base = prettyForm("<")
base = prettyForm(*base.right(shift))
base = prettyForm(*base.right(">"))
pform = base**n
return pform
else:
n = self._print(e.args[2])
shift = self._print(e.args[0]-e.args[1])
base = self._print_seq(shift, "<", ">", ' ')
return base**n
def _print_beta(self, e):
func_name = greek_unicode['Beta'] if self._use_unicode else 'B'
return self._print_Function(e, func_name=func_name)
def _print_betainc(self, e):
func_name = "B'"
return self._print_Function(e, func_name=func_name)
def _print_betainc_regularized(self, e):
func_name = 'I'
return self._print_Function(e, func_name=func_name)
def _print_gamma(self, e):
func_name = greek_unicode['Gamma'] if self._use_unicode else 'Gamma'
return self._print_Function(e, func_name=func_name)
def _print_uppergamma(self, e):
func_name = greek_unicode['Gamma'] if self._use_unicode else 'Gamma'
return self._print_Function(e, func_name=func_name)
def _print_lowergamma(self, e):
func_name = greek_unicode['gamma'] if self._use_unicode else 'lowergamma'
return self._print_Function(e, func_name=func_name)
def _print_DiracDelta(self, e):
if self._use_unicode:
if len(e.args) == 2:
a = prettyForm(greek_unicode['delta'])
b = self._print(e.args[1])
b = prettyForm(*b.parens())
c = self._print(e.args[0])
c = prettyForm(*c.parens())
pform = a**b
pform = prettyForm(*pform.right(' '))
pform = prettyForm(*pform.right(c))
return pform
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(greek_unicode['delta']))
return pform
else:
return self._print_Function(e)
def _print_expint(self, e):
subscript = self._print(e.args[0])
if self._use_unicode and is_subscriptable_in_unicode(subscript):
return self._print_Function(Function('E_%s' % subscript)(e.args[1]))
return self._print_Function(e)
def _print_Chi(self, e):
# This needs a special case since otherwise it comes out as greek
# letter chi...
prettyFunc = prettyForm("Chi")
prettyArgs = prettyForm(*self._print_seq(e.args).parens())
pform = prettyForm(
binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_elliptic_e(self, e):
pforma0 = self._print(e.args[0])
if len(e.args) == 1:
pform = pforma0
else:
pforma1 = self._print(e.args[1])
pform = self._hprint_vseparator(pforma0, pforma1)
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('E'))
return pform
def _print_elliptic_k(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('K'))
return pform
def _print_elliptic_f(self, e):
pforma0 = self._print(e.args[0])
pforma1 = self._print(e.args[1])
pform = self._hprint_vseparator(pforma0, pforma1)
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('F'))
return pform
def _print_elliptic_pi(self, e):
name = greek_unicode['Pi'] if self._use_unicode else 'Pi'
pforma0 = self._print(e.args[0])
pforma1 = self._print(e.args[1])
if len(e.args) == 2:
pform = self._hprint_vseparator(pforma0, pforma1)
else:
pforma2 = self._print(e.args[2])
pforma = self._hprint_vseparator(pforma1, pforma2, ifascii_nougly=False)
pforma = prettyForm(*pforma.left('; '))
pform = prettyForm(*pforma.left(pforma0))
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(name))
return pform
def _print_GoldenRatio(self, expr):
if self._use_unicode:
return prettyForm(pretty_symbol('phi'))
return self._print(Symbol("GoldenRatio"))
def _print_EulerGamma(self, expr):
if self._use_unicode:
return prettyForm(pretty_symbol('gamma'))
return self._print(Symbol("EulerGamma"))
def _print_Catalan(self, expr):
return self._print(Symbol("G"))
def _print_Mod(self, expr):
pform = self._print(expr.args[0])
if pform.binding > prettyForm.MUL:
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right(' mod '))
pform = prettyForm(*pform.right(self._print(expr.args[1])))
pform.binding = prettyForm.OPEN
return pform
def _print_Add(self, expr, order=None):
terms = self._as_ordered_terms(expr, order=order)
pforms, indices = [], []
def pretty_negative(pform, index):
"""Prepend a minus sign to a pretty form. """
#TODO: Move this code to prettyForm
if index == 0:
if pform.height() > 1:
pform_neg = '- '
else:
pform_neg = '-'
else:
pform_neg = ' - '
if (pform.binding > prettyForm.NEG
or pform.binding == prettyForm.ADD):
p = stringPict(*pform.parens())
else:
p = pform
p = stringPict.next(pform_neg, p)
# Lower the binding to NEG, even if it was higher. Otherwise, it
# will print as a + ( - (b)), instead of a - (b).
return prettyForm(binding=prettyForm.NEG, *p)
for i, term in enumerate(terms):
if term.is_Mul and term.could_extract_minus_sign():
coeff, other = term.as_coeff_mul(rational=False)
if coeff == -1:
negterm = Mul(*other, evaluate=False)
else:
negterm = Mul(-coeff, *other, evaluate=False)
pform = self._print(negterm)
pforms.append(pretty_negative(pform, i))
elif term.is_Rational and term.q > 1:
pforms.append(None)
indices.append(i)
elif term.is_Number and term < 0:
pform = self._print(-term)
pforms.append(pretty_negative(pform, i))
elif term.is_Relational:
pforms.append(prettyForm(*self._print(term).parens()))
else:
pforms.append(self._print(term))
if indices:
large = True
for pform in pforms:
if pform is not None and pform.height() > 1:
break
else:
large = False
for i in indices:
term, negative = terms[i], False
if term < 0:
term, negative = -term, True
if large:
pform = prettyForm(str(term.p))/prettyForm(str(term.q))
else:
pform = self._print(term)
if negative:
pform = pretty_negative(pform, i)
pforms[i] = pform
return prettyForm.__add__(*pforms)
def _print_Mul(self, product):
from sympy.physics.units import Quantity
# Check for unevaluated Mul. In this case we need to make sure the
# identities are visible, multiple Rational factors are not combined
# etc so we display in a straight-forward form that fully preserves all
# args and their order.
args = product.args
if args[0] is S.One or any(isinstance(arg, Number) for arg in args[1:]):
strargs = list(map(self._print, args))
# XXX: This is a hack to work around the fact that
# prettyForm.__mul__ absorbs a leading -1 in the args. Probably it
# would be better to fix this in prettyForm.__mul__ instead.
negone = strargs[0] == '-1'
if negone:
strargs[0] = prettyForm('1', 0, 0)
obj = prettyForm.__mul__(*strargs)
if negone:
obj = prettyForm('-' + obj.s, obj.baseline, obj.binding)
return obj
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = product.as_ordered_factors()
else:
args = list(product.args)
# If quantities are present append them at the back
args = sorted(args, key=lambda x: isinstance(x, Quantity) or
(isinstance(x, Pow) and isinstance(x.base, Quantity)))
# Gather terms for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append( Rational(item.p) )
if item.q != 1:
b.append( Rational(item.q) )
else:
a.append(item)
# Convert to pretty forms. Parentheses are added by `__mul__`.
a = [self._print(ai) for ai in a]
b = [self._print(bi) for bi in b]
# Construct a pretty form
if len(b) == 0:
return prettyForm.__mul__(*a)
else:
if len(a) == 0:
a.append( self._print(S.One) )
return prettyForm.__mul__(*a)/prettyForm.__mul__(*b)
# A helper function for _print_Pow to print x**(1/n)
def _print_nth_root(self, base, root):
bpretty = self._print(base)
# In very simple cases, use a single-char root sign
if (self._settings['use_unicode_sqrt_char'] and self._use_unicode
and root == 2 and bpretty.height() == 1
and (bpretty.width() == 1
or (base.is_Integer and base.is_nonnegative))):
return prettyForm(*bpretty.left(nth_root[2]))
# Construct root sign, start with the \/ shape
_zZ = xobj('/', 1)
rootsign = xobj('\\', 1) + _zZ
# Constructing the number to put on root
rpretty = self._print(root)
# roots look bad if they are not a single line
if rpretty.height() != 1:
return self._print(base)**self._print(1/root)
# If power is half, no number should appear on top of root sign
exp = '' if root == 2 else str(rpretty).ljust(2)
if len(exp) > 2:
rootsign = ' '*(len(exp) - 2) + rootsign
# Stack the exponent
rootsign = stringPict(exp + '\n' + rootsign)
rootsign.baseline = 0
# Diagonal: length is one less than height of base
linelength = bpretty.height() - 1
diagonal = stringPict('\n'.join(
' '*(linelength - i - 1) + _zZ + ' '*i
for i in range(linelength)
))
# Put baseline just below lowest line: next to exp
diagonal.baseline = linelength - 1
# Make the root symbol
rootsign = prettyForm(*rootsign.right(diagonal))
# Det the baseline to match contents to fix the height
# but if the height of bpretty is one, the rootsign must be one higher
rootsign.baseline = max(1, bpretty.baseline)
#build result
s = prettyForm(hobj('_', 2 + bpretty.width()))
s = prettyForm(*bpretty.above(s))
s = prettyForm(*s.left(rootsign))
return s
def _print_Pow(self, power):
from sympy.simplify.simplify import fraction
b, e = power.as_base_exp()
if power.is_commutative:
if e is S.NegativeOne:
return prettyForm("1")/self._print(b)
n, d = fraction(e)
if n is S.One and d.is_Atom and not e.is_Integer and (e.is_Rational or d.is_Symbol) \
and self._settings['root_notation']:
return self._print_nth_root(b, d)
if e.is_Rational and e < 0:
return prettyForm("1")/self._print(Pow(b, -e, evaluate=False))
if b.is_Relational:
return prettyForm(*self._print(b).parens()).__pow__(self._print(e))
return self._print(b)**self._print(e)
def _print_UnevaluatedExpr(self, expr):
return self._print(expr.args[0])
def __print_numer_denom(self, p, q):
if q == 1:
if p < 0:
return prettyForm(str(p), binding=prettyForm.NEG)
else:
return prettyForm(str(p))
elif abs(p) >= 10 and abs(q) >= 10:
# If more than one digit in numer and denom, print larger fraction
if p < 0:
return prettyForm(str(p), binding=prettyForm.NEG)/prettyForm(str(q))
# Old printing method:
#pform = prettyForm(str(-p))/prettyForm(str(q))
#return prettyForm(binding=prettyForm.NEG, *pform.left('- '))
else:
return prettyForm(str(p))/prettyForm(str(q))
else:
return None
def _print_Rational(self, expr):
result = self.__print_numer_denom(expr.p, expr.q)
if result is not None:
return result
else:
return self.emptyPrinter(expr)
def _print_Fraction(self, expr):
result = self.__print_numer_denom(expr.numerator, expr.denominator)
if result is not None:
return result
else:
return self.emptyPrinter(expr)
def _print_ProductSet(self, p):
if len(p.sets) >= 1 and not has_variety(p.sets):
return self._print(p.sets[0]) ** self._print(len(p.sets))
else:
prod_char = pretty_atom('Multiplication') if self._use_unicode else 'x'
return self._print_seq(p.sets, None, None, ' %s ' % prod_char,
parenthesize=lambda set: set.is_Union or
set.is_Intersection or set.is_ProductSet)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_seq(items, '{', '}', ', ' )
def _print_Range(self, s):
if self._use_unicode:
dots = pretty_atom('Dots')
else:
dots = '...'
if s.start.is_infinite and s.stop.is_infinite:
if s.step.is_positive:
printset = dots, -1, 0, 1, dots
else:
printset = dots, 1, 0, -1, dots
elif s.start.is_infinite:
printset = dots, s[-1] - s.step, s[-1]
elif s.stop.is_infinite:
it = iter(s)
printset = next(it), next(it), dots
elif len(s) > 4:
it = iter(s)
printset = next(it), next(it), dots, s[-1]
else:
printset = tuple(s)
return self._print_seq(printset, '{', '}', ', ' )
def _print_Interval(self, i):
if i.start == i.end:
return self._print_seq(i.args[:1], '{', '}')
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return self._print_seq(i.args[:2], left, right)
def _print_AccumulationBounds(self, i):
left = '<'
right = '>'
return self._print_seq(i.args[:2], left, right)
def _print_Intersection(self, u):
delimiter = ' %s ' % pretty_atom('Intersection', 'n')
return self._print_seq(u.args, None, None, delimiter,
parenthesize=lambda set: set.is_ProductSet or
set.is_Union or set.is_Complement)
def _print_Union(self, u):
union_delimiter = ' %s ' % pretty_atom('Union', 'U')
return self._print_seq(u.args, None, None, union_delimiter,
parenthesize=lambda set: set.is_ProductSet or
set.is_Intersection or set.is_Complement)
def _print_SymmetricDifference(self, u):
if not self._use_unicode:
raise NotImplementedError("ASCII pretty printing of SymmetricDifference is not implemented")
sym_delimeter = ' %s ' % pretty_atom('SymmetricDifference')
return self._print_seq(u.args, None, None, sym_delimeter)
def _print_Complement(self, u):
delimiter = r' \ '
return self._print_seq(u.args, None, None, delimiter,
parenthesize=lambda set: set.is_ProductSet or set.is_Intersection
or set.is_Union)
def _print_ImageSet(self, ts):
if self._use_unicode:
inn = pretty_atom("SmallElementOf")
else:
inn = 'in'
fun = ts.lamda
sets = ts.base_sets
signature = fun.signature
expr = self._print(fun.expr)
# TODO: the stuff to the left of the | and the stuff to the right of
# the | should have independent baselines, that way something like
# ImageSet(Lambda(x, 1/x**2), S.Naturals) prints the "x in N" part
# centered on the right instead of aligned with the fraction bar on
# the left. The same also applies to ConditionSet and ComplexRegion
if len(signature) == 1:
S = self._print_seq((signature[0], inn, sets[0]),
delimiter=' ')
return self._hprint_vseparator(expr, S,
left='{', right='}',
ifascii_nougly=True, delimiter=' ')
else:
pargs = tuple(j for var, setv in zip(signature, sets) for j in
(var, ' ', inn, ' ', setv, ", "))
S = self._print_seq(pargs[:-1], delimiter='')
return self._hprint_vseparator(expr, S,
left='{', right='}',
ifascii_nougly=True, delimiter=' ')
def _print_ConditionSet(self, ts):
if self._use_unicode:
inn = pretty_atom('SmallElementOf')
# using _and because and is a keyword and it is bad practice to
# overwrite them
_and = pretty_atom('And')
else:
inn = 'in'
_and = 'and'
variables = self._print_seq(Tuple(ts.sym))
as_expr = getattr(ts.condition, 'as_expr', None)
if as_expr is not None:
cond = self._print(ts.condition.as_expr())
else:
cond = self._print(ts.condition)
if self._use_unicode:
cond = self._print(cond)
cond = prettyForm(*cond.parens())
if ts.base_set is S.UniversalSet:
return self._hprint_vseparator(variables, cond, left="{",
right="}", ifascii_nougly=True,
delimiter=' ')
base = self._print(ts.base_set)
C = self._print_seq((variables, inn, base, _and, cond),
delimiter=' ')
return self._hprint_vseparator(variables, C, left="{", right="}",
ifascii_nougly=True, delimiter=' ')
def _print_ComplexRegion(self, ts):
if self._use_unicode:
inn = pretty_atom('SmallElementOf')
else:
inn = 'in'
variables = self._print_seq(ts.variables)
expr = self._print(ts.expr)
prodsets = self._print(ts.sets)
C = self._print_seq((variables, inn, prodsets),
delimiter=' ')
return self._hprint_vseparator(expr, C, left="{", right="}",
ifascii_nougly=True, delimiter=' ')
def _print_Contains(self, e):
var, set = e.args
if self._use_unicode:
el = f" {pretty_atom('ElementOf')} "
return prettyForm(*stringPict.next(self._print(var),
el, self._print(set)), binding=8)
else:
return prettyForm(sstr(e))
def _print_FourierSeries(self, s):
if s.an.formula is S.Zero and s.bn.formula is S.Zero:
return self._print(s.a0)
if self._use_unicode:
dots = pretty_atom('Dots')
else:
dots = '...'
return self._print_Add(s.truncate()) + self._print(dots)
def _print_FormalPowerSeries(self, s):
return self._print_Add(s.infinite)
def _print_SetExpr(self, se):
pretty_set = prettyForm(*self._print(se.set).parens())
pretty_name = self._print(Symbol("SetExpr"))
return prettyForm(*pretty_name.right(pretty_set))
def _print_SeqFormula(self, s):
if self._use_unicode:
dots = pretty_atom('Dots')
else:
dots = '...'
if len(s.start.free_symbols) > 0 or len(s.stop.free_symbols) > 0:
raise NotImplementedError("Pretty printing of sequences with symbolic bound not implemented")
if s.start is S.NegativeInfinity:
stop = s.stop
printset = (dots, s.coeff(stop - 3), s.coeff(stop - 2),
s.coeff(stop - 1), s.coeff(stop))
elif s.stop is S.Infinity or s.length > 4:
printset = s[:4]
printset.append(dots)
printset = tuple(printset)
else:
printset = tuple(s)
return self._print_list(printset)
_print_SeqPer = _print_SeqFormula
_print_SeqAdd = _print_SeqFormula
_print_SeqMul = _print_SeqFormula
def _print_seq(self, seq, left=None, right=None, delimiter=', ',
parenthesize=lambda x: False, ifascii_nougly=True):
pforms = []
for item in seq:
pform = self._print(item)
if parenthesize(item):
pform = prettyForm(*pform.parens())
if pforms:
pforms.append(delimiter)
pforms.append(pform)
if not pforms:
s = stringPict('')
else:
s = prettyForm(*stringPict.next(*pforms))
s = prettyForm(*s.parens(left, right, ifascii_nougly=ifascii_nougly))
return s
def join(self, delimiter, args):
pform = None
for arg in args:
if pform is None:
pform = arg
else:
pform = prettyForm(*pform.right(delimiter))
pform = prettyForm(*pform.right(arg))
if pform is None:
return prettyForm("")
else:
return pform
def _print_list(self, l):
return self._print_seq(l, '[', ']')
def _print_tuple(self, t):
if len(t) == 1:
ptuple = prettyForm(*stringPict.next(self._print(t[0]), ','))
return prettyForm(*ptuple.parens('(', ')', ifascii_nougly=True))
else:
return self._print_seq(t, '(', ')')
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for k in keys:
K = self._print(k)
V = self._print(d[k])
s = prettyForm(*stringPict.next(K, ': ', V))
items.append(s)
return self._print_seq(items, '{', '}')
def _print_Dict(self, d):
return self._print_dict(d)
def _print_set(self, s):
if not s:
return prettyForm('set()')
items = sorted(s, key=default_sort_key)
pretty = self._print_seq(items)
pretty = prettyForm(*pretty.parens('{', '}', ifascii_nougly=True))
return pretty
def _print_frozenset(self, s):
if not s:
return prettyForm('frozenset()')
items = sorted(s, key=default_sort_key)
pretty = self._print_seq(items)
pretty = prettyForm(*pretty.parens('{', '}', ifascii_nougly=True))
pretty = prettyForm(*pretty.parens('(', ')', ifascii_nougly=True))
pretty = prettyForm(*stringPict.next(type(s).__name__, pretty))
return pretty
def _print_UniversalSet(self, s):
if self._use_unicode:
return prettyForm(pretty_atom('Universe'))
else:
return prettyForm('UniversalSet')
def _print_PolyRing(self, ring):
return prettyForm(sstr(ring))
def _print_FracField(self, field):
return prettyForm(sstr(field))
def _print_FreeGroupElement(self, elm):
return prettyForm(str(elm))
def _print_PolyElement(self, poly):
return prettyForm(sstr(poly))
def _print_FracElement(self, frac):
return prettyForm(sstr(frac))
def _print_AlgebraicNumber(self, expr):
if expr.is_aliased:
return self._print(expr.as_poly().as_expr())
else:
return self._print(expr.as_expr())
def _print_ComplexRootOf(self, expr):
args = [self._print_Add(expr.expr, order='lex'), expr.index]
pform = prettyForm(*self._print_seq(args).parens())
pform = prettyForm(*pform.left('CRootOf'))
return pform
def _print_RootSum(self, expr):
args = [self._print_Add(expr.expr, order='lex')]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
pform = prettyForm(*self._print_seq(args).parens())
pform = prettyForm(*pform.left('RootSum'))
return pform
def _print_FiniteField(self, expr):
if self._use_unicode:
form = f"{pretty_atom('Integers')}_%d"
else:
form = 'GF(%d)'
return prettyForm(pretty_symbol(form % expr.mod))
def _print_IntegerRing(self, expr):
if self._use_unicode:
return prettyForm(pretty_atom('Integers'))
else:
return prettyForm('ZZ')
def _print_RationalField(self, expr):
if self._use_unicode:
return prettyForm(pretty_atom('Rationals'))
else:
return prettyForm('QQ')
def _print_RealField(self, domain):
if self._use_unicode:
prefix = pretty_atom("Reals")
else:
prefix = 'RR'
if domain.has_default_precision:
return prettyForm(prefix)
else:
return self._print(pretty_symbol(prefix + "_" + str(domain.precision)))
def _print_ComplexField(self, domain):
if self._use_unicode:
prefix = pretty_atom('Complexes')
else:
prefix = 'CC'
if domain.has_default_precision:
return prettyForm(prefix)
else:
return self._print(pretty_symbol(prefix + "_" + str(domain.precision)))
def _print_PolynomialRing(self, expr):
args = list(expr.symbols)
if not expr.order.is_default:
order = prettyForm(*prettyForm("order=").right(self._print(expr.order)))
args.append(order)
pform = self._print_seq(args, '[', ']')
pform = prettyForm(*pform.left(self._print(expr.domain)))
return pform
def _print_FractionField(self, expr):
args = list(expr.symbols)
if not expr.order.is_default:
order = prettyForm(*prettyForm("order=").right(self._print(expr.order)))
args.append(order)
pform = self._print_seq(args, '(', ')')
pform = prettyForm(*pform.left(self._print(expr.domain)))
return pform
def _print_PolynomialRingBase(self, expr):
g = expr.symbols
if str(expr.order) != str(expr.default_order):
g = g + ("order=" + str(expr.order),)
pform = self._print_seq(g, '[', ']')
pform = prettyForm(*pform.left(self._print(expr.domain)))
return pform
def _print_GroebnerBasis(self, basis):
exprs = [ self._print_Add(arg, order=basis.order)
for arg in basis.exprs ]
exprs = prettyForm(*self.join(", ", exprs).parens(left="[", right="]"))
gens = [ self._print(gen) for gen in basis.gens ]
domain = prettyForm(
*prettyForm("domain=").right(self._print(basis.domain)))
order = prettyForm(
*prettyForm("order=").right(self._print(basis.order)))
pform = self.join(", ", [exprs] + gens + [domain, order])
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(basis.__class__.__name__))
return pform
def _print_Subs(self, e):
pform = self._print(e.expr)
pform = prettyForm(*pform.parens())
h = pform.height() if pform.height() > 1 else 2
rvert = stringPict(vobj('|', h), baseline=pform.baseline)
pform = prettyForm(*pform.right(rvert))
b = pform.baseline
pform.baseline = pform.height() - 1
pform = prettyForm(*pform.right(self._print_seq([
self._print_seq((self._print(v[0]), xsym('=='), self._print(v[1])),
delimiter='') for v in zip(e.variables, e.point) ])))
pform.baseline = b
return pform
def _print_number_function(self, e, name):
# Print name_arg[0] for one argument or name_arg[0](arg[1])
# for more than one argument
pform = prettyForm(name)
arg = self._print(e.args[0])
pform_arg = prettyForm(" "*arg.width())
pform_arg = prettyForm(*pform_arg.below(arg))
pform = prettyForm(*pform.right(pform_arg))
if len(e.args) == 1:
return pform
m, x = e.args
# TODO: copy-pasted from _print_Function: can we do better?
prettyFunc = pform
prettyArgs = prettyForm(*self._print_seq([x]).parens())
pform = prettyForm(
binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_euler(self, e):
return self._print_number_function(e, "E")
def _print_catalan(self, e):
return self._print_number_function(e, "C")
def _print_bernoulli(self, e):
return self._print_number_function(e, "B")
_print_bell = _print_bernoulli
def _print_lucas(self, e):
return self._print_number_function(e, "L")
def _print_fibonacci(self, e):
return self._print_number_function(e, "F")
def _print_tribonacci(self, e):
return self._print_number_function(e, "T")
def _print_stieltjes(self, e):
if self._use_unicode:
return self._print_number_function(e, greek_unicode['gamma'])
else:
return self._print_number_function(e, "stieltjes")
def _print_KroneckerDelta(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.right(prettyForm(',')))
pform = prettyForm(*pform.right(self._print(e.args[1])))
if self._use_unicode:
a = stringPict(pretty_symbol('delta'))
else:
a = stringPict('d')
b = pform
top = stringPict(*b.left(' '*a.width()))
bot = stringPict(*a.right(' '*b.width()))
return prettyForm(binding=prettyForm.POW, *bot.below(top))
def _print_RandomDomain(self, d):
if hasattr(d, 'as_boolean'):
pform = self._print('Domain: ')
pform = prettyForm(*pform.right(self._print(d.as_boolean())))
return pform
elif hasattr(d, 'set'):
pform = self._print('Domain: ')
pform = prettyForm(*pform.right(self._print(d.symbols)))
pform = prettyForm(*pform.right(self._print(' in ')))
pform = prettyForm(*pform.right(self._print(d.set)))
return pform
elif hasattr(d, 'symbols'):
pform = self._print('Domain on ')
pform = prettyForm(*pform.right(self._print(d.symbols)))
return pform
else:
return self._print(None)
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(pretty_symbol(object.name))
def _print_Morphism(self, morphism):
arrow = xsym("-->")
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
tail = domain.right(arrow, codomain)[0]
return prettyForm(tail)
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(pretty_symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return prettyForm(pretty_name.right(":", pretty_morphism)[0])
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(
NamedMorphism(morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
circle = xsym(".")
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [pretty_symbol(component.name) for
component in morphism.components]
component_names_list.reverse()
component_names = circle.join(component_names_list) + ":"
pretty_name = self._print(component_names)
pretty_morphism = self._print_Morphism(morphism)
return prettyForm(pretty_name.right(pretty_morphism)[0])
def _print_Category(self, category):
return self._print(pretty_symbol(category.name))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
pretty_result = self._print(diagram.premises)
if diagram.conclusions:
results_arrow = " %s " % xsym("==>")
pretty_conclusions = self._print(diagram.conclusions)[0]
pretty_result = pretty_result.right(
results_arrow, pretty_conclusions)
return prettyForm(pretty_result[0])
def _print_DiagramGrid(self, grid):
from sympy.matrices import Matrix
matrix = Matrix([[grid[i, j] if grid[i, j] else Symbol(" ")
for j in range(grid.width)]
for i in range(grid.height)])
return self._print_matrix_contents(matrix)
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return self._print_seq(m, '[', ']')
def _print_SubModule(self, M):
gens = [[M.ring.to_sympy(g) for g in gen] for gen in M.gens]
return self._print_seq(gens, '<', '>')
def _print_FreeModule(self, M):
return self._print(M.ring)**self._print(M.rank)
def _print_ModuleImplementedIdeal(self, M):
sym = M.ring.to_sympy
return self._print_seq([sym(x) for [x] in M._module.gens], '<', '>')
def _print_QuotientRing(self, R):
return self._print(R.ring) / self._print(R.base_ideal)
def _print_QuotientRingElement(self, R):
return self._print(R.ring.to_sympy(R)) + self._print(R.ring.base_ideal)
def _print_QuotientModuleElement(self, m):
return self._print(m.data) + self._print(m.module.killed_module)
def _print_QuotientModule(self, M):
return self._print(M.base) / self._print(M.killed_module)
def _print_MatrixHomomorphism(self, h):
matrix = self._print(h._sympy_matrix())
matrix.baseline = matrix.height() // 2
pform = prettyForm(*matrix.right(' : ', self._print(h.domain),
' %s> ' % hobj('-', 2), self._print(h.codomain)))
return pform
def _print_Manifold(self, manifold):
return self._print(manifold.name)
def _print_Patch(self, patch):
return self._print(patch.name)
def _print_CoordSystem(self, coords):
return self._print(coords.name)
def _print_BaseScalarField(self, field):
string = field._coord_sys.symbols[field._index].name
return self._print(pretty_symbol(string))
def _print_BaseVectorField(self, field):
s = U('PARTIAL DIFFERENTIAL') + '_' + field._coord_sys.symbols[field._index].name
return self._print(pretty_symbol(s))
def _print_Differential(self, diff):
if self._use_unicode:
d = pretty_atom('Differential')
else:
d = 'd'
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys.symbols[field._index].name
return self._print(d + ' ' + pretty_symbol(string))
else:
pform = self._print(field)
pform = prettyForm(*pform.parens())
return prettyForm(*pform.left(d))
def _print_Tr(self, p):
#TODO: Handle indices
pform = self._print(p.args[0])
pform = prettyForm(*pform.left('%s(' % (p.__class__.__name__)))
pform = prettyForm(*pform.right(')'))
return pform
def _print_primenu(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens())
if self._use_unicode:
pform = prettyForm(*pform.left(greek_unicode['nu']))
else:
pform = prettyForm(*pform.left('nu'))
return pform
def _print_primeomega(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens())
if self._use_unicode:
pform = prettyForm(*pform.left(greek_unicode['Omega']))
else:
pform = prettyForm(*pform.left('Omega'))
return pform
def _print_Quantity(self, e):
if e.name.name == 'degree':
if self._use_unicode:
pform = self._print(pretty_atom('Degree'))
else:
pform = self._print(chr(176))
return pform
else:
return self.emptyPrinter(e)
def _print_AssignmentBase(self, e):
op = prettyForm(' ' + xsym(e.op) + ' ')
l = self._print(e.lhs)
r = self._print(e.rhs)
pform = prettyForm(*stringPict.next(l, op, r))
return pform
def _print_Str(self, s):
return self._print(s.name)
@print_function(PrettyPrinter)
def pretty(expr, **settings):
"""Returns a string containing the prettified form of expr.
For information on keyword arguments see pretty_print function.
"""
pp = PrettyPrinter(settings)
# XXX: this is an ugly hack, but at least it works
use_unicode = pp._settings['use_unicode']
uflag = pretty_use_unicode(use_unicode)
try:
return pp.doprint(expr)
finally:
pretty_use_unicode(uflag)
def pretty_print(expr, **kwargs):
"""Prints expr in pretty form.
pprint is just a shortcut for this function.
Parameters
==========
expr : expression
The expression to print.
wrap_line : bool, optional (default=True)
Line wrapping enabled/disabled.
num_columns : int or None, optional (default=None)
Number of columns before line breaking (default to None which reads
the terminal width), useful when using SymPy without terminal.
use_unicode : bool or None, optional (default=None)
Use unicode characters, such as the Greek letter pi instead of
the string pi.
full_prec : bool or string, optional (default="auto")
Use full precision.
order : bool or string, optional (default=None)
Set to 'none' for long expressions if slow; default is None.
use_unicode_sqrt_char : bool, optional (default=True)
Use compact single-character square root symbol (when unambiguous).
root_notation : bool, optional (default=True)
Set to 'False' for printing exponents of the form 1/n in fractional form.
By default exponent is printed in root form.
mat_symbol_style : string, optional (default="plain")
Set to "bold" for printing MatrixSymbols using a bold mathematical symbol face.
By default the standard face is used.
imaginary_unit : string, optional (default="i")
Letter to use for imaginary unit when use_unicode is True.
Can be "i" (default) or "j".
"""
print(pretty(expr, **kwargs))
pprint = pretty_print
def pager_print(expr, **settings):
"""Prints expr using the pager, in pretty form.
This invokes a pager command using pydoc. Lines are not wrapped
automatically. This routine is meant to be used with a pager that allows
sideways scrolling, like ``less -S``.
Parameters are the same as for ``pretty_print``. If you wish to wrap lines,
pass ``num_columns=None`` to auto-detect the width of the terminal.
"""
from pydoc import pager
from locale import getpreferredencoding
if 'num_columns' not in settings:
settings['num_columns'] = 500000 # disable line wrap
pager(pretty(expr, **settings).encode(getpreferredencoding()))
| PrettyPrinter |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 112982,
"end": 113435
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("lockable_id", "lock_reason", "client_mutation_id")
lockable_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="lockableId")
lock_reason = sgqlc.types.Field(LockReason, graphql_name="lockReason")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| LockLockableInput |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 142434,
"end": 154427
} | class ____(Response):
"""
Response of datasets.get_by_name endpoint.
:param dataset: Dataset info
:type dataset: Dataset
"""
_service = "datasets"
_action = "get_by_name"
_version = "2.23"
_schema = {
"definitions": {
"dataset": {
"properties": {
"comment": {"description": "", "type": ["string", "null"]},
"company": {
"description": "Company ID",
"type": ["string", "null"],
},
"created": {
"description": "Dataset creation time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"display_stats": {
"description": "Calculated statistics for the latest committed or published version",
"oneOf": [
{"$ref": "#/definitions/statistics"},
{"type": "null"},
],
},
"display_version_name": {
"description": "The name of the version from which statistics are taken",
"type": ["string", "null"],
},
"head_version": {
"description": (
"The most recent version for write operations. Calculated as the non-published version with"
" the longest path to the root."
),
"oneOf": [{"$ref": "#/definitions/version"}, {"type": "null"}],
},
"id": {"description": "Dataset ID", "type": ["string", "null"]},
"last_update": {
"description": (
"Time of last update (UTC). Updated on dataset update; on any version operation:\nwhen"
" version is created, modified, committed, published or deleted; and on any frame"
" operation: when frames are added,\nmodified or deleted."
),
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": True,
"description": "User-provided metadata",
"type": ["object", "null"],
},
"name": {
"description": "Dataset name",
"type": ["string", "null"],
},
"paradigm": {
"description": (
"'single_version' for datasets whose version tree has only one path, 'general' otherwise"
),
"oneOf": [
{"$ref": "#/definitions/version_paradigm_enum"},
{"type": "null"},
],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"system_tags": {
"description": (
"List of system tags. This field is reserved for system use, please don't use it."
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "List of user-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"terms_of_use": {
"description": "Terms of use string",
"type": ["string", "null"],
},
"user": {
"description": "Associated user ID",
"type": ["string", "null"],
},
"version_count": {
"description": "Amount of versions in dataset. Only supported by datasets.get_all.",
"type": ["integer", "null"],
},
},
"type": "object",
},
"stat_count": {
"properties": {
"count": {
"description": "Item name",
"type": ["integer", "null"],
},
"name": {
"description": "Number of appearances",
"type": ["string", "null"],
},
},
"type": "object",
},
"statistics": {
"properties": {
"content_types": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of content type counts for the version (e.g.\n 'image/jpeg',"
" 'image/png', 'video/mp4')"
),
},
"type": ["array", "null"],
},
"frames": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of frame counts, indicating the\n type of frames included in"
" the version (annotated/"
),
},
"type": ["array", "null"],
},
"labels": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of labels' counts,\n indicating the categories included in the"
" version"
),
},
"type": ["array", "null"],
},
},
"type": "object",
},
"version": {
"properties": {
"comment": {
"description": "Version comment",
"type": ["string", "null"],
},
"committed": {
"description": "Commit time",
"format": "date-time",
"type": ["string", "null"],
},
"committed_frames_ts": {
"description": "Timestamp of last committed frame",
"type": ["number", "null"],
},
"committed_rois_ts": {
"description": "Timestamp of last committed ROI",
"type": ["number", "null"],
},
"company": {
"description": "Company ID",
"type": ["string", "null"],
},
"created": {
"description": "Version creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"dataset": {
"description": "Datset ID",
"type": ["string", "null"],
},
"es_index": {
"description": "Name of elasticsearch index",
"type": ["string", "null"],
},
"id": {"description": "Version ID", "type": ["string", "null"]},
"last_frames_update": {
"description": "Last time version was created, committed or frames were updated or saved",
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": True,
"description": "User-provided metadata",
"type": ["object", "null"],
},
"name": {
"description": "Version name",
"type": ["string", "null"],
},
"parent": {
"description": "Version parent ID",
"type": ["string", "null"],
},
"published": {
"description": "Publish time",
"format": "date-time",
"type": ["string", "null"],
},
"stats": {
"description": "Version statistics",
"oneOf": [
{"$ref": "#/definitions/statistics"},
{"type": "null"},
],
},
"status": {
"description": "Version status",
"oneOf": [
{"$ref": "#/definitions/version_status_enum"},
{"type": "null"},
],
},
"system_tags": {
"description": (
"List of system tags. This field is reserved for system use, please don't use it."
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "List of user-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of the task which created the version",
"type": ["string", "null"],
},
"user": {
"description": "Associated user ID",
"type": ["string", "null"],
},
},
"type": "object",
},
"version_paradigm_enum": {
"enum": ["single_version", "general"],
"type": "string",
},
"version_status_enum": {
"enum": ["draft", "committing", "committed", "published"],
"type": "string",
},
},
"properties": {
"dataset": {
"description": "Dataset info",
"oneOf": [{"$ref": "#/definitions/dataset"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, dataset=None, **kwargs):
super(GetByNameResponse, self).__init__(**kwargs)
self.dataset = dataset
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
if isinstance(value, dict):
value = Dataset.from_dict(value)
else:
self.assert_isinstance(value, "dataset", Dataset)
self._property_dataset = value
| GetByNameResponse |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 105767,
"end": 108540
} | class ____(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c', 'd')
@property
def set(self):
return Interval(self.a, self.d)
@staticmethod
def check(a, b, c, d):
_value_check(a < d, "Lower bound parameter a < %s. a = %s"%(d, a))
_value_check((a <= b, b < c),
"Level start parameter b must be in range [%s, %s). b = %s"%(a, c, b))
_value_check((b < c, c <= d),
"Level end parameter c must be in range (%s, %s]. c = %s"%(b, d, c))
_value_check(d >= c, "Upper bound parameter d > %s. d = %s"%(c, d))
def pdf(self, x):
a, b, c, d = self.a, self.b, self.c, self.d
return Piecewise(
(2*(x-a) / ((b-a)*(d+c-a-b)), And(a <= x, x < b)),
(2 / (d+c-a-b), And(b <= x, x < c)),
(2*(d-x) / ((d-c)*(d+c-a-b)), And(c <= x, x <= d)),
(S.Zero, True))
def Trapezoidal(name, a, b, c, d):
r"""
Create a continuous random variable with a trapezoidal distribution.
Explanation
===========
The density of the trapezoidal distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(d+c-a-b)} & \mathrm{for\ } a \le x < b, \\
\frac{2}{d+c-a-b} & \mathrm{for\ } b \le x < c, \\
\frac{2(d-x)}{(d-c)(d+c-a-b)} & \mathrm{for\ } c \le x < d, \\
0 & \mathrm{for\ } d < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a < d`
b : Real number, :math:`a \le b < c`
c : Real number, :math:`b < c \le d`
d : Real number
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Trapezoidal, density
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> d = Symbol("d")
>>> z = Symbol("z")
>>> X = Trapezoidal("x", a,b,c,d)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|------------------------- for And(a <= z, b > z)
|(-a + b)*(-a - b + c + d)
|
| 2
| -------------- for And(b <= z, c > z)
< -a - b + c + d
|
| 2*d - 2*z
|------------------------- for And(d >= z, c <= z)
|(-c + d)*(-a - b + c + d)
|
\ 0 otherwise
References
==========
.. [1] https://en.wikipedia.org/wiki/Trapezoidal_distribution
"""
return rv(name, TrapezoidalDistribution, (a, b, c, d))
#-------------------------------------------------------------------------------
# Triangular distribution ------------------------------------------------------
| TrapezoidalDistribution |
python | django__django | tests/model_forms/models.py | {
"start": 14867,
"end": 15654
} | class ____(models.Model):
name = models.CharField(max_length=100)
category = models.CharField(max_length=50, default="uncategorized")
price = models.DecimalField(max_digits=10, decimal_places=2, default=0)
class Meta:
required_db_features = {"supports_table_check_constraints"}
constraints = [
models.UniqueConstraint(
"name",
"category",
name="unique_name_category",
violation_error_message="This product already exists.",
),
models.CheckConstraint(
condition=models.Q(price__gt=0),
name="price_gte_zero",
violation_error_message="Price must be greater than zero.",
),
]
| ConstraintsModel |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 112826,
"end": 115860
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Qwen3OmniMoeConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Qwen3OmniMoeConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| Qwen3OmniMoeRotaryEmbedding |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 115884,
"end": 117870
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.hypot(x1, x2)
def compute_output_spec(self, x1, x2):
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype in ["int8", "int16", "int32", "uint8", "uint16", "uint32"]:
dtype = backend.floatx()
elif dtype == "int64":
dtype = "float64"
return KerasTensor(broadcast_shapes(x1.shape, x2.shape), dtype=dtype)
@keras_export(["keras.ops.hypot", "keras.ops.numpy.hypot"])
def hypot(x1, x2):
"""Element-wise hypotenuse of right triangles with legs `x1` and `x2`.
This is equivalent to computing `sqrt(x1**2 + x2**2)` element-wise,
with shape determined by broadcasting.
Args:
x1: A tensor, representing the first leg of the right triangle.
x2: A tensor, representing the second leg of the right triangle.
Returns:
A tensor with a shape determined by broadcasting `x1` and `x2`.
Example:
>>> x1 = keras.ops.convert_to_tensor([3.0, 4.0, 5.0])
>>> x2 = keras.ops.convert_to_tensor([4.0, 3.0, 12.0])
>>> keras.ops.hypot(x1, x2)
array([5., 5., 13.], dtype=float32)
>>> x1 = keras.ops.convert_to_tensor([[1, 2], [3, 4]])
>>> x2 = keras.ops.convert_to_tensor([1, 1])
>>> keras.ops.hypot(x1, x2)
array([[1.41421356 2.23606798],
[3.16227766 4.12310563]], dtype=float32)
"""
if any_symbolic_tensors((x1, x2)):
return Hypot().symbolic_call(x1, x2)
return backend.numpy.hypot(x1, x2)
@keras_export(["keras.ops.identity", "keras.ops.numpy.identity"])
def identity(n, dtype=None):
"""Return the identity tensor.
The identity tensor is a square tensor with ones on the main diagonal and
zeros elsewhere.
Args:
n: Number of rows (and columns) in the `n x n` output tensor.
dtype: Data type of the output tensor.
Returns:
The identity tensor.
"""
return backend.numpy.identity(n, dtype=dtype)
| Hypot |
python | jazzband__django-oauth-toolkit | oauth2_provider/contrib/rest_framework/permissions.py | {
"start": 1908,
"end": 2558
} | class ____(TokenHasScope):
"""
The request is authenticated as a user and the token used has the right scope
"""
def get_scopes(self, request, view):
try:
required_scopes = super().get_scopes(request, view)
except ImproperlyConfigured:
required_scopes = []
# TODO: code duplication!! see dispatch in ReadWriteScopedResourceMixin
if request.method.upper() in SAFE_METHODS:
read_write_scope = oauth2_settings.READ_SCOPE
else:
read_write_scope = oauth2_settings.WRITE_SCOPE
return required_scopes + [read_write_scope]
| TokenHasReadWriteScope |
python | kubernetes-client__python | kubernetes/client/api/policy_api.py | {
"start": 543,
"end": 5178
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/policy/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| PolicyApi |
python | pallets__click | src/click/_compat.py | {
"start": 2074,
"end": 14039
} | class ____:
"""The new io interface needs more from streams than streams
traditionally implement. As such, this fix-up code is necessary in
some circumstances.
The forcing of readable and writable flags are there because some tools
put badly patched objects on sys (one such offender are certain version
of jupyter notebook).
"""
def __init__(
self,
stream: t.BinaryIO,
force_readable: bool = False,
force_writable: bool = False,
):
self._stream = stream
self._force_readable = force_readable
self._force_writable = force_writable
def __getattr__(self, name: str) -> t.Any:
return getattr(self._stream, name)
def read1(self, size: int) -> bytes:
f = getattr(self._stream, "read1", None)
if f is not None:
return t.cast(bytes, f(size))
return self._stream.read(size)
def readable(self) -> bool:
if self._force_readable:
return True
x = getattr(self._stream, "readable", None)
if x is not None:
return t.cast(bool, x())
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self) -> bool:
if self._force_writable:
return True
x = getattr(self._stream, "writable", None)
if x is not None:
return t.cast(bool, x())
try:
self._stream.write(b"")
except Exception:
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self) -> bool:
x = getattr(self._stream, "seekable", None)
if x is not None:
return t.cast(bool, x())
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
def _is_binary_reader(stream: t.IO[t.Any], default: bool = False) -> bool:
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case, we assume the default.
def _is_binary_writer(stream: t.IO[t.Any], default: bool = False) -> bool:
try:
stream.write(b"")
except Exception:
try:
stream.write("")
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream: t.IO[t.Any]) -> t.BinaryIO | None:
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_reader(stream, False):
return t.cast(t.BinaryIO, stream)
buf = getattr(stream, "buffer", None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return t.cast(t.BinaryIO, buf)
return None
def _find_binary_writer(stream: t.IO[t.Any]) -> t.BinaryIO | None:
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return t.cast(t.BinaryIO, stream)
buf = getattr(stream, "buffer", None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return t.cast(t.BinaryIO, buf)
return None
def _stream_is_misconfigured(stream: t.TextIO) -> bool:
"""A stream is misconfigured if its encoding is ASCII."""
# If the stream does not have an encoding set, we assume it's set
# to ASCII. This appears to happen in certain unittest
# environments. It's not quite clear what the correct behavior is
# but this at least will force Click to recover somehow.
return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii")
def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: str | None) -> bool:
"""A stream attribute is compatible if it is equal to the
desired value or the desired value is unset and the attribute
has a value.
"""
stream_value = getattr(stream, attr, None)
return stream_value == value or (value is None and stream_value is not None)
def _is_compatible_text_stream(
stream: t.TextIO, encoding: str | None, errors: str | None
) -> bool:
"""Check if a stream's encoding and errors attributes are
compatible with the desired values.
"""
return _is_compat_stream_attr(
stream, "encoding", encoding
) and _is_compat_stream_attr(stream, "errors", errors)
def _force_correct_text_stream(
text_stream: t.IO[t.Any],
encoding: str | None,
errors: str | None,
is_binary: t.Callable[[t.IO[t.Any], bool], bool],
find_binary: t.Callable[[t.IO[t.Any]], t.BinaryIO | None],
force_readable: bool = False,
force_writable: bool = False,
) -> t.TextIO:
if is_binary(text_stream, False):
binary_reader = t.cast(t.BinaryIO, text_stream)
else:
text_stream = t.cast(t.TextIO, text_stream)
# If the stream looks compatible, and won't default to a
# misconfigured ascii encoding, return it as-is.
if _is_compatible_text_stream(text_stream, encoding, errors) and not (
encoding is None and _stream_is_misconfigured(text_stream)
):
return text_stream
# Otherwise, get the underlying binary reader.
possible_binary_reader = find_binary(text_stream)
# If that's not possible, silently use the original reader
# and get mojibake instead of exceptions.
if possible_binary_reader is None:
return text_stream
binary_reader = possible_binary_reader
# Default errors to replace instead of strict in order to get
# something that works.
if errors is None:
errors = "replace"
# Wrap the binary stream in a text stream with the correct
# encoding parameters.
return _make_text_stream(
binary_reader,
encoding,
errors,
force_readable=force_readable,
force_writable=force_writable,
)
def _force_correct_text_reader(
text_reader: t.IO[t.Any],
encoding: str | None,
errors: str | None,
force_readable: bool = False,
) -> t.TextIO:
return _force_correct_text_stream(
text_reader,
encoding,
errors,
_is_binary_reader,
_find_binary_reader,
force_readable=force_readable,
)
def _force_correct_text_writer(
text_writer: t.IO[t.Any],
encoding: str | None,
errors: str | None,
force_writable: bool = False,
) -> t.TextIO:
return _force_correct_text_stream(
text_writer,
encoding,
errors,
_is_binary_writer,
_find_binary_writer,
force_writable=force_writable,
)
def get_binary_stdin() -> t.BinaryIO:
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise RuntimeError("Was not able to determine binary stream for sys.stdin.")
return reader
def get_binary_stdout() -> t.BinaryIO:
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise RuntimeError("Was not able to determine binary stream for sys.stdout.")
return writer
def get_binary_stderr() -> t.BinaryIO:
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError("Was not able to determine binary stream for sys.stderr.")
return writer
def get_text_stdin(encoding: str | None = None, errors: str | None = None) -> t.TextIO:
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True)
def get_text_stdout(encoding: str | None = None, errors: str | None = None) -> t.TextIO:
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True)
def get_text_stderr(encoding: str | None = None, errors: str | None = None) -> t.TextIO:
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True)
def _wrap_io_open(
file: str | os.PathLike[str] | int,
mode: str,
encoding: str | None,
errors: str | None,
) -> t.IO[t.Any]:
"""Handles not passing ``encoding`` and ``errors`` in binary mode."""
if "b" in mode:
return open(file, mode)
return open(file, mode, encoding=encoding, errors=errors)
def open_stream(
filename: str | os.PathLike[str],
mode: str = "r",
encoding: str | None = None,
errors: str | None = "strict",
atomic: bool = False,
) -> tuple[t.IO[t.Any], bool]:
binary = "b" in mode
filename = os.fspath(filename)
# Standard streams first. These are simple because they ignore the
# atomic flag. Use fsdecode to handle Path("-").
if os.fsdecode(filename) == "-":
if any(m in mode for m in ["w", "a", "x"]):
if binary:
return get_binary_stdout(), False
return get_text_stdout(encoding=encoding, errors=errors), False
if binary:
return get_binary_stdin(), False
return get_text_stdin(encoding=encoding, errors=errors), False
# Non-atomic writes directly go out through the regular open functions.
if not atomic:
return _wrap_io_open(filename, mode, encoding, errors), True
# Some usability stuff for atomic writes
if "a" in mode:
raise ValueError(
"Appending to an existing file is not supported, because that"
" would involve an expensive `copy`-operation to a temporary"
" file. Open the file in normal `w`-mode and copy explicitly"
" if that's what you're after."
)
if "x" in mode:
raise ValueError("Use the `overwrite`-parameter instead.")
if "w" not in mode:
raise ValueError("Atomic writes only make sense with `w`-mode.")
# Atomic writes are more complicated. They work by opening a file
# as a proxy in the same folder and then using the fdopen
# functionality to wrap it in a Python file. Then we wrap it in an
# atomic file that moves the file over on close.
import errno
import random
try:
perm: int | None = os.stat(filename).st_mode
except OSError:
perm = None
flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
if binary:
flags |= getattr(os, "O_BINARY", 0)
while True:
tmp_filename = os.path.join(
os.path.dirname(filename),
f".__atomic-write{random.randrange(1 << 32):08x}",
)
try:
fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm)
break
except OSError as e:
if e.errno == errno.EEXIST or (
os.name == "nt"
and e.errno == errno.EACCES
and os.path.isdir(e.filename)
and os.access(e.filename, os.W_OK)
):
continue
raise
if perm is not None:
os.chmod(tmp_filename, perm) # in case perm includes bits in umask
f = _wrap_io_open(fd, mode, encoding, errors)
af = _AtomicFile(f, tmp_filename, os.path.realpath(filename))
return t.cast(t.IO[t.Any], af), True
| _FixupStream |
python | ray-project__ray | python/ray/llm/_internal/batch/processor/vllm_engine_proc.py | {
"start": 2026,
"end": 12074
} | class ____(OfflineProcessorConfig):
"""The configuration for the vLLM engine processor."""
# vLLM stage configurations.
engine_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="The kwargs to pass to the vLLM engine. See "
"https://docs.vllm.ai/en/latest/serving/engine_args.html "
"for more details.",
)
task_type: vLLMTaskType = Field(
default=vLLMTaskType.GENERATE,
description="The task type to use. If not specified, will use "
"'generate' by default.",
)
# LoRA configurations.
dynamic_lora_loading_path: Optional[str] = Field(
default=None,
description="The path to the dynamic LoRA adapter. It is expected "
"to hold subfolders each for a different lora checkpoint. If not "
"specified and LoRA is enabled, then the 'model' in LoRA "
"requests will be interpreted as model ID used by HF transformers.",
)
# Custom placement group config for TP/PP.
placement_group_config: Optional[Dict[str, Any]] = Field(
default=None,
description="Ray placement group configuration for scheduling vLLM engine workers. "
"Should be a dictionary with 'bundles' (list of resource dicts, e.g., {'CPU': 1, 'GPU': 1}) "
"and an optional 'strategy' key ('PACK', 'STRICT_PACK', 'SPREAD', or 'STRICT_SPREAD'). "
"For ray distributed executor backend, each bundle must specify at most one GPU. "
"For mp backend, the 'strategy' field is ignored.",
)
@root_validator(pre=True)
def validate_task_type(cls, values):
task_type_str = values.get("task_type", "generate")
values["task_type"] = vLLMTaskType(task_type_str)
return values
@root_validator(pre=True)
def validate_placement_group_config(cls, values):
placement_group_config = values.get("placement_group_config")
if placement_group_config is not None:
values["placement_group_config"] = PlacementGroupSchema(
**placement_group_config
).model_dump()
return values
def build_vllm_engine_processor(
config: vLLMEngineProcessorConfig,
chat_template_kwargs: Optional[Dict[str, Any]] = None,
preprocess: Optional[UserDefinedFunction] = None,
postprocess: Optional[UserDefinedFunction] = None,
preprocess_map_kwargs: Optional[Dict[str, Any]] = None,
postprocess_map_kwargs: Optional[Dict[str, Any]] = None,
telemetry_agent: Optional[TelemetryAgent] = None,
) -> Processor:
"""Construct a Processor and configure stages.
Args:
config: The configuration for the processor.
chat_template_kwargs: The optional kwargs to pass to apply_chat_template.
preprocess: An optional lambda function that takes a row (dict) as input
and returns a preprocessed row (dict). The output row must contain the
required fields for the following processing stages.
postprocess: An optional lambda function that takes a row (dict) as input
and returns a postprocessed row (dict).
preprocess_map_kwargs: Optional kwargs to pass to Dataset.map() for the
preprocess stage (e.g., num_cpus, memory, concurrency).
postprocess_map_kwargs: Optional kwargs to pass to Dataset.map() for the
postprocess stage (e.g., num_cpus, memory, concurrency).
telemetry_agent: An optional telemetry agent for collecting usage telemetry.
Returns:
The constructed processor.
"""
ray.init(runtime_env=config.runtime_env, ignore_reinit_error=True)
stages = []
# Prepare processor defaults for merging into stage configs
processor_defaults = {
"batch_size": config.batch_size,
"concurrency": config.concurrency,
"runtime_env": config.runtime_env,
"model_source": config.model_source,
}
# Resolve and build PrepareImageStage if enabled
image_stage_cfg = resolve_stage_config(
config.prepare_image_stage,
PrepareImageStageConfig,
processor_defaults,
)
if image_stage_cfg.enabled:
stages.append(
PrepareImageStage(
map_batches_kwargs=build_cpu_stage_map_kwargs(image_stage_cfg),
)
)
# Resolve and build ChatTemplateStage if enabled
chat_template_stage_cfg = resolve_stage_config(
getattr(config, "chat_template_stage", config.apply_chat_template),
ChatTemplateStageConfig,
processor_defaults,
)
if chat_template_stage_cfg.enabled:
stages.append(
ChatTemplateStage(
fn_constructor_kwargs=dict(
model=chat_template_stage_cfg.model_source,
chat_template=get_value_or_fallback(
chat_template_stage_cfg.chat_template, config.chat_template
),
chat_template_kwargs=get_value_or_fallback(
chat_template_stage_cfg.chat_template_kwargs,
chat_template_kwargs,
),
),
map_batches_kwargs=build_cpu_stage_map_kwargs(chat_template_stage_cfg),
)
)
# Resolve and build TokenizeStage if enabled
tokenize_stage_cfg = resolve_stage_config(
getattr(config, "tokenize_stage", config.tokenize),
TokenizerStageConfig,
processor_defaults,
)
if tokenize_stage_cfg.enabled:
stages.append(
TokenizeStage(
fn_constructor_kwargs=dict(
model=tokenize_stage_cfg.model_source,
),
map_batches_kwargs=build_cpu_stage_map_kwargs(tokenize_stage_cfg),
)
)
# Core stage -- the vLLM engine.
stages.append(
vLLMEngineStage(
fn_constructor_kwargs=dict(
batch_size=config.batch_size,
max_concurrent_batches=config.max_concurrent_batches,
model=config.model_source,
engine_kwargs=config.engine_kwargs,
task_type=config.task_type,
max_pending_requests=config.max_pending_requests,
dynamic_lora_loading_path=config.dynamic_lora_loading_path,
placement_group_config=config.placement_group_config,
),
map_batches_kwargs=dict(
zero_copy_batch=True,
# The number of running replicas. This is a deprecated field, but
# we need to set `max_tasks_in_flight_per_actor` through `compute`,
# which initiates enough many overlapping UDF calls per actor, to
# saturate `max_concurrency`.
compute=ray.data.ActorPoolStrategy(
min_size=config.get_concurrency(autoscaling_enabled=False)[0],
max_size=config.get_concurrency(autoscaling_enabled=False)[1],
max_tasks_in_flight_per_actor=config.experimental.get(
"max_tasks_in_flight_per_actor", DEFAULT_MAX_TASKS_IN_FLIGHT
),
),
# The number of running batches "per actor" in Ray Core level.
# This is used to make sure we overlap batches to avoid the tail
# latency of each batch.
max_concurrency=config.max_concurrent_batches,
accelerator_type=config.accelerator_type,
runtime_env=config.runtime_env,
),
)
)
# Resolve and build DetokenizeStage if enabled
detokenize_stage_cfg = resolve_stage_config(
getattr(config, "detokenize_stage", config.detokenize),
DetokenizeStageConfig,
processor_defaults,
)
if detokenize_stage_cfg.enabled:
stages.append(
DetokenizeStage(
fn_constructor_kwargs=dict(
model=detokenize_stage_cfg.model_source,
),
map_batches_kwargs=build_cpu_stage_map_kwargs(detokenize_stage_cfg),
)
)
# We download the config files here so that we can report the underlying architecture to the telemetry system.
# This should be a lightweight operation.
if config.engine_kwargs.get("load_format", None) in STREAMING_LOAD_FORMATS:
download_model_mode = NodeModelDownloadable.EXCLUDE_SAFETENSORS
else:
download_model_mode = NodeModelDownloadable.TOKENIZER_ONLY
model_path = download_model_files(
model_id=config.model_source,
mirror_config=None,
download_model=download_model_mode,
download_extra_files=False,
)
hf_config = transformers.AutoConfig.from_pretrained(
model_path,
trust_remote_code=config.engine_kwargs.get("trust_remote_code", False),
)
architectures = getattr(hf_config, "architectures", [])
architecture = architectures[0] if architectures else DEFAULT_MODEL_ARCHITECTURE
telemetry_agent = get_or_create_telemetry_agent()
telemetry_agent.push_telemetry_report(
BatchModelTelemetry(
processor_config_name=type(config).__name__,
model_architecture=architecture,
batch_size=config.batch_size,
accelerator_type=config.accelerator_type or DEFAULT_GPU_TYPE,
concurrency=config.concurrency,
task_type=vLLMTaskType(config.task_type),
pipeline_parallel_size=config.engine_kwargs.get(
"pipeline_parallel_size", 1
),
tensor_parallel_size=config.engine_kwargs.get("tensor_parallel_size", 1),
)
)
processor = Processor(
config,
stages,
preprocess=preprocess,
postprocess=postprocess,
preprocess_map_kwargs=preprocess_map_kwargs,
postprocess_map_kwargs=postprocess_map_kwargs,
)
return processor
ProcessorBuilder.register(vLLMEngineProcessorConfig, build_vllm_engine_processor)
| vLLMEngineProcessorConfig |
python | bokeh__bokeh | src/bokeh/models/formatters.py | {
"start": 3054,
"end": 3296
} | class ____(Model):
''' A base class for all tick formatter types.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| TickFormatter |
python | bokeh__bokeh | examples/output/apis/autoload_static.py | {
"start": 736,
"end": 1097
} | class ____(RequestHandler):
def initialize(self, script):
self.script = script
def get(self):
self.write(template.render(script=self.script))
# Normally, you might save the .js files to some location on disk, and serve
# them from there. Here we use this request handler, just to make the example
# completely self-contained.
| IndexHandler |
python | pytorch__pytorch | torch/_dynamo/variables/sdpa.py | {
"start": 651,
"end": 2987
} | class ____(VariableTracker):
"""Represents the c++ params struct for scaled dot product attention.
This is a read-only container."""
@staticmethod
def create(
tx: "InstructionTranslator", value: Any, source: Source
) -> VariableTracker:
from .torch import TorchInGraphFunctionVariable
params = [
VariableTracker.build(tx, getattr(value, p), AttrSource(source, p))
for p in PARAM_NAMES
]
return TorchInGraphFunctionVariable(SDPAParams).call_function(tx, params, {})
def __init__(
self, proxy: Proxy, param_vars: Sequence[VariableTracker], **kwargs: Any
) -> None:
self.proxy = proxy
self.param_vars = param_vars
super().__init__(**kwargs)
def reconstruct(self, codegen: "PyCodegen") -> None:
assert self.source is None
assert self.param_vars is not None
codegen.add_push_null(
lambda: codegen.load_import_from("torch._C", "_SDPAParams")
)
codegen.foreach(self.param_vars)
codegen.extend_output(create_call_function(len(self.param_vars), False))
def as_proxy(self) -> Proxy:
return self.proxy
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
import torch._C
from .builder import wrap_fx_proxy
from .misc import GetAttrVariable
try:
getattr_static(torch._C._SDPAParams, name)
except AttributeError:
import torch._dynamo.graph_break_hints as graph_break_hints
unimplemented(
gb_type="unsupported torch._C._SDPAParams attribute",
context=f"name: {name}",
explanation=f"Unable to fetch attribute {name} from torch._C._SDPAParams.",
hints=[
*graph_break_hints.USER_ERROR,
],
)
proxy = GetAttrVariable.create_getattr_proxy(self.as_proxy(), name)
if self.source is not None:
return wrap_fx_proxy(
tx=tx, proxy=proxy, source=AttrSource(self.source, name)
)
else:
return wrap_fx_proxy(tx=tx, proxy=proxy)
@staticmethod
def is_sdpa_params(value: Any) -> TypeGuard["SDPAParams"]:
return value is SDPAParams
| SDPAParamsVariable |
python | walkccc__LeetCode | solutions/3225. Maximum Score From Grid Operations/3225.py | {
"start": 0,
"end": 1852
} | class ____:
def maximumScore(self, grid: list[list[int]]) -> int:
n = len(grid)
# prefix[j][i] := the sum of the first i elements in the j-th column
prefix = [[0] * (n + 1) for _ in range(n)]
# prevPick[i] := the maximum score up to the previous column, where the
# bottommost selected element in the previous column is in row (i - 1)
prevPick = [0] * (n + 1)
# prevSkip[i] := the maximum score up to the previous column, where the
# bottommost selected element in the column before the previous one is in
# row (i - 1)
prevSkip = [0] * (n + 1)
for j in range(n):
for i in range(n):
prefix[j][i + 1] = prefix[j][i] + grid[i][j]
for j in range(1, n):
currPick = [0] * (n + 1)
currSkip = [0] * (n + 1)
# Consider all possible combinations of the number of current and
# previous selected elements.
for curr in range(n + 1): # the number of current selected elements
for prev in range(n + 1): # the number of previous selected elements
if curr > prev:
# 1. The current bottom is deeper than the previous bottom.
# Get the score of grid[prev..curr)[j - 1] for both pick and skip.
score = prefix[j - 1][curr] - prefix[j - 1][prev]
currPick[curr] = max(currPick[curr], prevSkip[prev] + score)
currSkip[curr] = max(currSkip[curr], prevSkip[prev] + score)
else:
# 2. The previous bottom is deeper than the current bottom.
# Get the score of grid[curr..prev)[j] for pick only.
score = prefix[j][prev] - prefix[j][curr]
currPick[curr] = max(currPick[curr], prevPick[prev] + score)
currSkip[curr] = max(currSkip[curr], prevPick[prev])
prevPick = currPick
prevSkip = currSkip
return max(prevPick)
| Solution |
python | google__pytype | pytype/tests/test_utils.py | {
"start": 7287,
"end": 8037
} | class ____:
"""Mixin providing a method to make a code object from bytecode."""
_HAS_DYNAMIC_ATTRIBUTES = True
def make_code(self, int_array, name="testcode"):
"""Utility method for creating CodeType objects."""
return pycnite.types.CodeType38(
co_argcount=0,
co_posonlyargcount=0,
co_kwonlyargcount=0,
co_nlocals=2,
co_stacksize=2,
co_flags=0,
co_consts=[None, 1, 2],
co_names=[],
co_varnames=["x", "y"],
co_filename="",
co_name=name,
co_firstlineno=1,
co_lnotab=b"",
co_freevars=(),
co_cellvars=(),
co_code=bytes(itertools.chain(*int_array)),
python_version=self.python_version,
)
| MakeCodeMixin |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 7084,
"end": 7430
} | class ____(enum.Flag):
NONE = enum.auto()
ABSTRACT = enum.auto()
COROUTINE = enum.auto()
FINAL = enum.auto()
@classmethod
def abstract_flag(cls, is_abstract): # pylint: disable=invalid-name
# Useful when creating functions directly (other flags aren't needed there).
return cls.ABSTRACT if is_abstract else cls.NONE
| MethodFlag |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-recharge/unit_tests/integration/utils.py | {
"start": 2128,
"end": 2457
} | class ____(TestCase, ABC):
_STREAM_NAME: str
def setUp(self):
self._access_token = "an_access_token"
self._config = config().with_access_token(self._access_token)
def stream_request(self):
return RequestBuilder.get_endpoint(self._STREAM_NAME).with_access_token(self._access_token)
| StreamTestCase |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0019_migrate_protected_versions_to_hidden.py | {
"start": 341,
"end": 565
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0018_add_hidden_field_to_version"),
]
operations = [
migrations.RunPython(forwards_func),
]
| Migration |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/cloud_storage_transfer_service.py | {
"start": 10094,
"end": 14026
} | class ____(BaseTrigger):
"""
CloudDataTransferServiceRunJobTrigger run on the trigger worker to run Cloud Storage Transfer job.
:param job_name: The name of the transfer job
:param project_id: The ID of the project that owns the Transfer Job.
:param poke_interval: Polling period in seconds to check for the status
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
job_name: str,
project_id: str = PROVIDE_PROJECT_ID,
poke_interval: float = 10.0,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
):
super().__init__()
self.job_name = job_name
self.project_id = project_id
self.poke_interval = poke_interval
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize CloudDataTransferServiceRunJobTrigger arguments and classpath."""
return (
f"{self.__class__.__module__}.{self.__class__.__qualname__}",
{
"job_name": self.job_name,
"project_id": self.project_id,
"poke_interval": self.poke_interval,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
},
)
def _get_async_hook(self) -> CloudDataTransferServiceAsyncHook:
return CloudDataTransferServiceAsyncHook(
project_id=self.project_id,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Run the transfer job and yield a TriggerEvent."""
hook = self._get_async_hook()
try:
job_operation = await hook.run_transfer_job(self.job_name)
while True:
job_completed = await job_operation.done()
if job_completed:
yield TriggerEvent(
{
"status": "success",
"message": "Transfer operation run completed successfully",
"job_result": {
"name": job_operation.operation.name,
"metadata": MessageToDict(
job_operation.operation.metadata, preserving_proto_field_name=True
),
"response": MessageToDict(
job_operation.operation.response, preserving_proto_field_name=True
),
},
}
)
return
self.log.info(
"Sleeping for %s seconds.",
self.poke_interval,
)
await asyncio.sleep(self.poke_interval)
except Exception as e:
self.log.exception("Exception occurred while running transfer job")
yield TriggerEvent({"status": "error", "message": str(e)})
| CloudDataTransferServiceRunJobTrigger |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/interfaces.py | {
"start": 89213,
"end": 98633
} | class ____:
"""A set of hooks intended to augment the construction of an
:class:`_engine.Engine` object based on entrypoint names in a URL.
The purpose of :class:`_engine.CreateEnginePlugin` is to allow third-party
systems to apply engine, pool and dialect level event listeners without
the need for the target application to be modified; instead, the plugin
names can be added to the database URL. Target applications for
:class:`_engine.CreateEnginePlugin` include:
* connection and SQL performance tools, e.g. which use events to track
number of checkouts and/or time spent with statements
* connectivity plugins such as proxies
A rudimentary :class:`_engine.CreateEnginePlugin` that attaches a logger
to an :class:`_engine.Engine` object might look like::
import logging
from sqlalchemy.engine import CreateEnginePlugin
from sqlalchemy import event
class LogCursorEventsPlugin(CreateEnginePlugin):
def __init__(self, url, kwargs):
# consume the parameter "log_cursor_logging_name" from the
# URL query
logging_name = url.query.get(
"log_cursor_logging_name", "log_cursor"
)
self.log = logging.getLogger(logging_name)
def update_url(self, url):
"update the URL to one that no longer includes our parameters"
return url.difference_update_query(["log_cursor_logging_name"])
def engine_created(self, engine):
"attach an event listener after the new Engine is constructed"
event.listen(engine, "before_cursor_execute", self._log_event)
def _log_event(
self,
conn,
cursor,
statement,
parameters,
context,
executemany,
):
self.log.info("Plugin logged cursor event: %s", statement)
Plugins are registered using entry points in a similar way as that
of dialects::
entry_points = {
"sqlalchemy.plugins": [
"log_cursor_plugin = myapp.plugins:LogCursorEventsPlugin"
]
}
A plugin that uses the above names would be invoked from a database
URL as in::
from sqlalchemy import create_engine
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?"
"plugin=log_cursor_plugin&log_cursor_logging_name=mylogger"
)
The ``plugin`` URL parameter supports multiple instances, so that a URL
may specify multiple plugins; they are loaded in the order stated
in the URL::
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?"
"plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three"
)
The plugin names may also be passed directly to :func:`_sa.create_engine`
using the :paramref:`_sa.create_engine.plugins` argument::
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test", plugins=["myplugin"]
)
A plugin may consume plugin-specific arguments from the
:class:`_engine.URL` object as well as the ``kwargs`` dictionary, which is
the dictionary of arguments passed to the :func:`_sa.create_engine`
call. "Consuming" these arguments includes that they must be removed
when the plugin initializes, so that the arguments are not passed along
to the :class:`_engine.Dialect` constructor, where they will raise an
:class:`_exc.ArgumentError` because they are not known by the dialect.
As of version 1.4 of SQLAlchemy, arguments should continue to be consumed
from the ``kwargs`` dictionary directly, by removing the values with a
method such as ``dict.pop``. Arguments from the :class:`_engine.URL` object
should be consumed by implementing the
:meth:`_engine.CreateEnginePlugin.update_url` method, returning a new copy
of the :class:`_engine.URL` with plugin-specific parameters removed::
class MyPlugin(CreateEnginePlugin):
def __init__(self, url, kwargs):
self.my_argument_one = url.query["my_argument_one"]
self.my_argument_two = url.query["my_argument_two"]
self.my_argument_three = kwargs.pop("my_argument_three", None)
def update_url(self, url):
return url.difference_update_query(
["my_argument_one", "my_argument_two"]
)
Arguments like those illustrated above would be consumed from a
:func:`_sa.create_engine` call such as::
from sqlalchemy import create_engine
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?"
"plugin=myplugin&my_argument_one=foo&my_argument_two=bar",
my_argument_three="bat",
)
.. versionchanged:: 1.4
The :class:`_engine.URL` object is now immutable; a
:class:`_engine.CreateEnginePlugin` that needs to alter the
:class:`_engine.URL` should implement the newly added
:meth:`_engine.CreateEnginePlugin.update_url` method, which
is invoked after the plugin is constructed.
For migration, construct the plugin in the following way, checking
for the existence of the :meth:`_engine.CreateEnginePlugin.update_url`
method to detect which version is running::
class MyPlugin(CreateEnginePlugin):
def __init__(self, url, kwargs):
if hasattr(CreateEnginePlugin, "update_url"):
# detect the 1.4 API
self.my_argument_one = url.query["my_argument_one"]
self.my_argument_two = url.query["my_argument_two"]
else:
# detect the 1.3 and earlier API - mutate the
# URL directly
self.my_argument_one = url.query.pop("my_argument_one")
self.my_argument_two = url.query.pop("my_argument_two")
self.my_argument_three = kwargs.pop("my_argument_three", None)
def update_url(self, url):
# this method is only called in the 1.4 version
return url.difference_update_query(
["my_argument_one", "my_argument_two"]
)
.. seealso::
:ref:`change_5526` - overview of the :class:`_engine.URL` change which
also includes notes regarding :class:`_engine.CreateEnginePlugin`.
When the engine creation process completes and produces the
:class:`_engine.Engine` object, it is again passed to the plugin via the
:meth:`_engine.CreateEnginePlugin.engine_created` hook. In this hook, additional
changes can be made to the engine, most typically involving setup of
events (e.g. those defined in :ref:`core_event_toplevel`).
""" # noqa: E501
def __init__(self, url: URL, kwargs: Dict[str, Any]):
"""Construct a new :class:`.CreateEnginePlugin`.
The plugin object is instantiated individually for each call
to :func:`_sa.create_engine`. A single :class:`_engine.
Engine` will be
passed to the :meth:`.CreateEnginePlugin.engine_created` method
corresponding to this URL.
:param url: the :class:`_engine.URL` object. The plugin may inspect
the :class:`_engine.URL` for arguments. Arguments used by the
plugin should be removed, by returning an updated :class:`_engine.URL`
from the :meth:`_engine.CreateEnginePlugin.update_url` method.
.. versionchanged:: 1.4
The :class:`_engine.URL` object is now immutable, so a
:class:`_engine.CreateEnginePlugin` that needs to alter the
:class:`_engine.URL` object should implement the
:meth:`_engine.CreateEnginePlugin.update_url` method.
:param kwargs: The keyword arguments passed to
:func:`_sa.create_engine`.
"""
self.url = url
def update_url(self, url: URL) -> URL:
"""Update the :class:`_engine.URL`.
A new :class:`_engine.URL` should be returned. This method is
typically used to consume configuration arguments from the
:class:`_engine.URL` which must be removed, as they will not be
recognized by the dialect. The
:meth:`_engine.URL.difference_update_query` method is available
to remove these arguments. See the docstring at
:class:`_engine.CreateEnginePlugin` for an example.
.. versionadded:: 1.4
"""
raise NotImplementedError()
def handle_dialect_kwargs(
self, dialect_cls: Type[Dialect], dialect_args: Dict[str, Any]
) -> None:
"""parse and modify dialect kwargs"""
def handle_pool_kwargs(
self, pool_cls: Type[Pool], pool_args: Dict[str, Any]
) -> None:
"""parse and modify pool kwargs"""
def engine_created(self, engine: Engine) -> None:
"""Receive the :class:`_engine.Engine`
object when it is fully constructed.
The plugin may make additional changes to the engine, such as
registering engine or connection pool events.
"""
| CreateEnginePlugin |
python | django__django | django/forms/fields.py | {
"start": 47233,
"end": 47273
} | class ____(str):
pass
| InvalidJSONInput |
python | ApeWorX__ape | src/ape_cache/query.py | {
"start": 815,
"end": 18046
} | class ____(QueryAPI):
"""
Default implementation of the :class:`~ape.api.query.QueryAPI`.
Allows for the query of blockchain data using a connected provider.
"""
# Class var for tracking if we detect a scenario where the cache db isn't working
database_bypass = False
def _get_database_file(self, ecosystem_name: str, network_name: str) -> Path:
"""
Allows us to figure out what the file *will be*, mostly used for database management.
Args:
ecosystem_name (str): Name of the ecosystem to store data for (ex: ethereum)
network_name (str): name of the network to store data for (ex: mainnet)
Raises:
:class:`~ape.exceptions.QueryEngineError`: If a local network is provided.
"""
if network_name == LOCAL_NETWORK_NAME:
# NOTE: no need to cache local network, no use for data
raise QueryEngineError("Cannot cache local data")
if "-fork" in network_name:
# NOTE: send query to pull from upstream
network_name = network_name.replace("-fork", "")
return self.config_manager.DATA_FOLDER / ecosystem_name / network_name / "cache.db"
def _get_sqlite_uri(self, database_file: Path) -> str:
"""
Gets a string for the sqlite db URI.
Args:
database_file (`pathlib.Path`): A path to the database file.
Returns:
str
"""
return f"sqlite:///{database_file}"
def init_database(self, ecosystem_name: str, network_name: str):
"""
Initialize the SQLite database for caching of provider data.
Args:
ecosystem_name (str): Name of the ecosystem to store data for (ex: ethereum)
network_name (str): name of the network to store data for (ex: mainnet)
Raises:
:class:`~ape.exceptions.QueryEngineError`: When the database has not been initialized
"""
database_file = self._get_database_file(ecosystem_name, network_name)
if database_file.is_file():
raise QueryEngineError("Database has already been initialized")
# NOTE: Make sure database folder location has been created
database_file.parent.mkdir(exist_ok=True, parents=True)
models.Base.metadata.create_all( # type: ignore
bind=create_engine(self._get_sqlite_uri(database_file), pool_pre_ping=True)
)
def purge_database(self, ecosystem_name: str, network_name: str):
"""
Removes the SQLite database file from disk.
Args:
ecosystem_name (str): Name of the ecosystem to store data for (ex: ethereum)
network_name (str): name of the network to store data for (ex: mainnet)
Raises:
:class:`~ape.exceptions.QueryEngineError`: When the database has not been initialized
"""
database_file = self._get_database_file(ecosystem_name, network_name)
if not database_file.is_file():
raise QueryEngineError("Database must be initialized")
database_file.unlink()
@property
def database_connection(self):
"""
Returns a connection for the currently active network.
**NOTE**: Creates a database if it doesn't exist.
Raises:
:class:`~ape.exceptions.QueryEngineError`: If you are not connected to a provider,
or if the database has not been initialized.
Returns:
Optional[`sqlalchemy.engine.Connection`]
"""
if self.provider.network.is_local:
return None
if not self.network_manager.connected:
raise QueryEngineError("Not connected to a provider")
database_file = self._get_database_file(
self.provider.network.ecosystem.name, self.provider.network.name
)
if not database_file.is_file():
# NOTE: Raising `info` here hints user that they can initialize the cache db
logger.info("`ape-cache` database has not been initialized")
self.database_bypass = True
return None
try:
sqlite_uri = self._get_sqlite_uri(database_file)
return create_engine(sqlite_uri, pool_pre_ping=True).connect()
except QueryEngineError as e:
logger.debug(f"Exception when querying:\n{e}")
return None
except Exception as e:
logger.warning(f"Unhandled exception when querying:\n{e}")
self.database_bypass = True
return None
@singledispatchmethod
def _estimate_query_clause(self, query: QueryType) -> Select:
"""
A singledispatchmethod that returns a select statement.
Args:
query (QueryType): Choice of query type to perform a
check of the number of rows that match the clause.
Raises:
:class:`~ape.exceptions.QueryEngineError`: When given an
incompatible QueryType.
Returns:
`sqlalchemy.sql.expression.Select`
"""
raise QueryEngineError(
"""
Not a compatible QueryType. For more details see our docs
https://docs.apeworx.io/ape/stable/methoddocs/exceptions.html#ape.exceptions.QueryEngineError
"""
)
@_estimate_query_clause.register
def _block_estimate_query_clause(self, query: BlockQuery) -> Select:
return (
select(func.count())
.select_from(Blocks)
.where(Blocks.number >= query.start_block)
.where(Blocks.number <= query.stop_block)
.where(Blocks.number % query.step == 0)
)
@_estimate_query_clause.register
def _transaction_estimate_query_clause(self, query: BlockTransactionQuery) -> Select:
return (
select(func.count())
.select_from(Transactions)
.where(Transactions.block_hash == query.block_id)
)
@_estimate_query_clause.register
def _contract_events_estimate_query_clause(self, query: ContractEventQuery) -> Select:
return (
select(func.count())
.select_from(ContractEvents)
.where(ContractEvents.block_number >= query.start_block)
.where(ContractEvents.block_number <= query.stop_block)
.where(ContractEvents.block_number % query.step == 0)
)
@singledispatchmethod
def _compute_estimate(self, query: QueryType, result: CursorResult) -> Optional[int]:
"""
A singledispatchemethod that computes the time a query
will take to perform from the caching database
"""
return None # can't handle this query
@_compute_estimate.register
def _compute_estimate_block_query(
self,
query: BlockQuery,
result: CursorResult,
) -> Optional[int]:
if result.scalar() == (1 + query.stop_block - query.start_block) // query.step:
# NOTE: Assume 200 msec to get data from database
return 200
# Can't handle this query
# TODO: Allow partial queries
return None
@_compute_estimate.register
def _compute_estimate_block_transaction_query(
self,
query: BlockTransactionQuery,
result: CursorResult,
) -> Optional[int]:
# TODO: Update `transactions` table schema so this query functions properly
# Uncomment below after https://github.com/ApeWorX/ape/issues/994
# if result.scalar() > 0: # type: ignore
# # NOTE: Assume 200 msec to get data from database
# return 200
# Can't handle this query
return None
@_compute_estimate.register
def _compute_estimate_contract_events_query(
self,
query: ContractEventQuery,
result: CursorResult,
) -> Optional[int]:
if result.scalar() == (query.stop_block - query.start_block) // query.step:
# NOTE: Assume 200 msec to get data from database
return 200
# Can't handle this query
# TODO: Allow partial queries
return None
def estimate_query(self, query: QueryType) -> Optional[int]:
"""
Method called by the client to return a query time estimate.
Args:
query (QueryType): Choice of query type to perform a
check of the number of rows that match the clause.
Returns:
Optional[int]
"""
# NOTE: Because of Python shortcircuiting, the first time `database_connection` is missing
# this will lock the class var `database_bypass` in place for the rest of the session
if self.database_bypass or self.database_connection is None:
# No database, or some other issue
return None
try:
with self.database_connection as conn:
result = conn.execute(self._estimate_query_clause(query))
if not result:
return None
return self._compute_estimate(query, result)
except QueryEngineError as err:
logger.debug(f"Bypassing cache database: {err}")
# Note: The reason we return None instead of failing is that we want
# a failure of the query to bypass the query logic so that the
# estimation phase does not fail in `QueryManager`.
return None
@singledispatchmethod
def perform_query(self, query: QueryType) -> Iterator: # type: ignore
"""
Performs the requested query from cache.
Args:
query (QueryType): Choice of query type to perform a
check of the number of rows that match the clause.
Raises:
:class:`~ape.exceptions.QueryEngineError`: When given an
incompatible QueryType, or encounters some sort of error
in the database or estimation logic.
Returns:
Iterator
"""
raise QueryEngineError(
"Not a compatible QueryType. For more details see our docs "
"https://docs.apeworx.io/ape/stable/methoddocs/"
"exceptions.html#ape.exceptions.QueryEngineError"
)
@perform_query.register
def _perform_block_query(self, query: BlockQuery) -> Iterator[BlockAPI]:
with self.database_connection as conn:
result = conn.execute(
select([column(c) for c in query.columns])
.where(Blocks.number >= query.start_block)
.where(Blocks.number <= query.stop_block)
.where(Blocks.number % query.step == 0)
)
if not result:
# NOTE: Should be unreachable if estimated correctly
raise QueryEngineError(f"Could not perform query:\n{query}")
yield from map(
lambda row: self.provider.network.ecosystem.decode_block(dict(row.items())), result
)
@perform_query.register
def _perform_transaction_query(self, query: BlockTransactionQuery) -> Iterator[dict]:
with self.database_connection as conn:
result = conn.execute(
select([Transactions]).where(Transactions.block_hash == query.block_id)
)
if not result:
# NOTE: Should be unreachable if estimated correctly
raise QueryEngineError(f"Could not perform query:\n{query}")
yield from map(lambda row: dict(row.items()), result)
@perform_query.register
def _perform_contract_events_query(self, query: ContractEventQuery) -> Iterator[ContractLog]:
with self.database_connection as conn:
result = conn.execute(
select([column(c) for c in query.columns])
.where(ContractEvents.block_number >= query.start_block)
.where(ContractEvents.block_number <= query.stop_block)
.where(ContractEvents.block_number % query.step == 0)
)
if not result:
# NOTE: Should be unreachable if estimated correctly
raise QueryEngineError(f"Could not perform query:\n{query}")
yield from map(lambda row: ContractLog.model_validate(dict(row.items())), result)
@singledispatchmethod
def _cache_update_clause(self, query: QueryType) -> Insert:
"""
Update cache database Insert statement.
Args:
query (QueryType): Choice of query type to perform a
check of the number of rows that match the clause.
Raises:
:class:`~ape.exceptions.QueryEngineError`: When given an
incompatible QueryType, or encounters some sort of error
in the database or estimation logic.
Returns:
`sqlalchemy.sql.Expression.Insert`
"""
# Can't cache this query
raise QueryEngineError(
"Not a compatible QueryType. For more details see our docs "
"https://docs.apeworx.io/ape/stable/methoddocs/"
"exceptions.html#ape.exceptions.QueryEngineError"
)
@_cache_update_clause.register
def _cache_update_block_clause(self, query: BlockQuery) -> Insert:
return insert(Blocks)
# TODO: Update `transactions` table schema so we can use `EcosystemAPI.decode_receipt`
# Uncomment below after https://github.com/ApeWorX/ape/issues/994
# @_cache_update_clause.register
# def _cache_update_block_txns_clause(self, query: BlockTransactionQuery) -> Insert:
# return insert(Transactions) # type: ignore
@_cache_update_clause.register
def _cache_update_events_clause(self, query: ContractEventQuery) -> Insert:
return insert(ContractEvents)
@singledispatchmethod
def _get_cache_data(
self, query: QueryType, result: Iterator[BaseInterfaceModel]
) -> Optional[list[dict[str, Any]]]:
raise QueryEngineError(
"""
Not a compatible QueryType. For more details see our docs
https://docs.apeworx.io/ape/stable/methoddocs/exceptions.html#ape.exceptions.QueryEngineError
"""
)
@_get_cache_data.register
def _get_block_cache_data(
self, query: BlockQuery, result: Iterator[BaseInterfaceModel]
) -> Optional[list[dict[str, Any]]]:
return [m.model_dump(mode="json", by_alias=False) for m in result]
@_get_cache_data.register
def _get_block_txns_data(
self, query: BlockTransactionQuery, result: Iterator[BaseInterfaceModel]
) -> Optional[list[dict[str, Any]]]:
new_result = []
table_columns = [c.key for c in Transactions.__table__.columns] # type: ignore
txns: list[TransactionAPI] = cast(list[TransactionAPI], result)
for val in [m for m in txns]:
new_dict = {
k: v
for k, v in val.model_dump(mode="json", by_alias=False).items()
if k in table_columns
}
for col in table_columns:
if col == "txn_hash":
new_dict["txn_hash"] = val.txn_hash
elif col == "sender":
new_dict["sender"] = new_dict["sender"].encode()
elif col == "receiver" and "receiver" in new_dict:
new_dict["receiver"] = new_dict["receiver"].encode()
elif col == "receiver" and "receiver" not in new_dict:
new_dict["receiver"] = b""
elif col == "block_hash":
new_dict["block_hash"] = query.block_id
elif col == "signature" and val.signature is not None:
new_dict["signature"] = val.signature.encode_rsv()
elif col not in new_dict:
new_dict[col] = None
new_result.append(new_dict)
return new_result
@_get_cache_data.register
def _get_cache_events_data(
self, query: ContractEventQuery, result: Iterator[BaseInterfaceModel]
) -> Optional[list[dict[str, Any]]]:
return [m.model_dump(mode="json", by_alias=False) for m in result]
def update_cache(self, query: QueryType, result: Iterator[BaseInterfaceModel]):
try:
clause = self._cache_update_clause(query)
except QueryEngineError:
# Cannot handle query type
return
# NOTE: Because of Python shortcircuiting, the first time `database_connection` is missing
# this will lock the class var `database_bypass` in place for the rest of the session
if not self.database_bypass and self.database_connection is not None:
logger.debug(f"Caching query: {query}")
with self.database_connection as conn:
try:
conn.execute(
clause.values( # type: ignore
self._get_cache_data(query, result)
).prefix_with("OR IGNORE")
)
except QueryEngineError as err:
logger.warning(f"Database corruption: {err}")
| CacheQueryProvider |
python | has2k1__plotnine | plotnine/scales/scale_shape.py | {
"start": 2031,
"end": 2081
} | class ____(scale_shape):
pass
| scale_shape_discrete |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 33716,
"end": 34144
} | class ____(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 address.'],
'127.122.111.2231': ['Enter a valid IPv4 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv4')
| TestIPv4AddressField |
python | pypa__warehouse | warehouse/macaroons/caveats/_core.py | {
"start": 596,
"end": 693
} | class ____:
def __bool__(self):
return True
@dataclass(frozen=True, slots=True)
| Success |
python | pytorch__pytorch | torch/nn/parallel/distributed.py | {
"start": 7546,
"end": 7647
} | class ____(Enum):
PRE_FORWARD = auto()
POST_FORWARD = auto()
@dataclass
| _BufferCommHookLocation |
python | aimacode__aima-python | games.py | {
"start": 14316,
"end": 14466
} | class ____(TicTacToe):
"""Also known as Five in a row."""
def __init__(self, h=15, v=16, k=5):
TicTacToe.__init__(self, h, v, k)
| Gomoku |
python | pytorch__pytorch | test/distributed/pipelining/test_schedule.py | {
"start": 1569,
"end": 2140
} | class ____(_PipelineStageBase):
def __init__(self, *args, **kwargs):
# Mock the necessary attributes
self.submod = None
self.num_stages = kwargs.get("num_stages", 1)
self.group_size = kwargs.get("group_size", 1)
self.group_rank = kwargs.get("group_rank", 0)
self.group = kwargs.get("group")
def _create_grad_recv_info(self, *args, **kwargs):
return None
def _prepare_forward_infra(self, n_microbatches):
pass
def _prepare_backward_infra(self, n_microbatches):
pass
| MockPipelineStage |
python | astropy__astropy | astropy/io/ascii/core.py | {
"start": 11686,
"end": 13513
} | class ____:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter: str | None = None
""" one-character string used to separate fields """
def process_line(self, line: str) -> str:
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
"""
return line.strip()
def process_val(self, val: str) -> str:
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals: list[str]) -> str:
if self.delimiter is None:
delimiter = " "
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
| BaseSplitter |
python | pytorch__pytorch | torch/distributed/fsdp/fully_sharded_data_parallel.py | {
"start": 2854,
"end": 2999
} | class ____(Enum):
"""Represents the type of key in an optimizer state-dict."""
PARAM_NAME = auto()
PARAM_ID = auto()
| OptimStateKeyType |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/complete_orm_no_plugin.py | {
"start": 577,
"end": 701
} | class ____(metaclass=DeclarativeMeta):
__abstract__ = True
registry = registry()
metadata = registry.metadata
| Base |
python | pyqtgraph__pyqtgraph | pyqtgraph/multiprocess/remoteproxy.py | {
"start": 32691,
"end": 46863
} | class ____(object):
"""
Proxy to an object stored by the remote process. Proxies are created
by calling Process._import(), Process.transfer(), or by requesting/calling
attributes on existing proxy objects.
For the most part, this object can be used exactly as if it
were a local object::
rsys = proc._import('sys') # returns proxy to sys module on remote process
rsys.stdout # proxy to remote sys.stdout
rsys.stdout.write # proxy to remote sys.stdout.write
rsys.stdout.write('hello') # calls sys.stdout.write('hello') on remote machine
# and returns the result (None)
When calling a proxy to a remote function, the call can be made synchronous
(result of call is returned immediately), asynchronous (result is returned later),
or return can be disabled entirely::
ros = proc._import('os')
## synchronous call; result is returned immediately
pid = ros.getpid()
## asynchronous call
request = ros.getpid(_callSync='async')
while not request.hasResult():
time.sleep(0.01)
pid = request.result()
## disable return when we know it isn't needed
rsys.stdout.write('hello', _callSync='off')
Additionally, values returned from a remote function call are automatically
returned either by value (must be picklable) or by proxy.
This behavior can be forced::
rnp = proc._import('numpy')
arrProxy = rnp.array([1,2,3,4], _returnType='proxy')
arrValue = rnp.array([1,2,3,4], _returnType='value')
The default callSync and returnType behaviors (as well as others) can be set
for each proxy individually using ObjectProxy._setProxyOptions() or globally using
proc.setProxyOptions().
"""
def __init__(self, processId, proxyId, typeStr='', parent=None):
object.__init__(self)
## can't set attributes directly because setattr is overridden.
self.__dict__['_processId'] = processId
self.__dict__['_typeStr'] = typeStr
self.__dict__['_proxyId'] = proxyId
self.__dict__['_attributes'] = ()
## attributes that affect the behavior of the proxy.
## in all cases, a value of None causes the proxy to ask
## its parent event handler to make the decision
self.__dict__['_proxyOptions'] = {
'callSync': None, ## 'sync', 'async', None
'timeout': None, ## float, None
'returnType': None, ## 'proxy', 'value', 'auto', None
'deferGetattr': None, ## True, False, None
'noProxyTypes': None, ## list of types to send by value instead of by proxy
'autoProxy': None,
}
self.__dict__['_handler'] = RemoteEventHandler.getHandler(processId)
self.__dict__['_handler'].registerProxy(self) ## handler will watch proxy; inform remote process when the proxy is deleted.
def _setProxyOptions(self, **kwds):
"""
Change the behavior of this proxy. For all options, a value of None
will cause the proxy to instead use the default behavior defined
by its parent Process.
Options are:
============= =============================================================
callSync 'sync', 'async', 'off', or None.
If 'async', then calling methods will return a Request object
which can be used to inquire later about the result of the
method call.
If 'sync', then calling a method
will block until the remote process has returned its result
or the timeout has elapsed (in this case, a Request object
is returned instead).
If 'off', then the remote process is instructed _not_ to
reply and the method call will return None immediately.
returnType 'auto', 'proxy', 'value', or None.
If 'proxy', then the value returned when calling a method
will be a proxy to the object on the remote process.
If 'value', then attempt to pickle the returned object and
send it back.
If 'auto', then the decision is made by consulting the
'noProxyTypes' option.
autoProxy bool or None. If True, arguments to __call__ are
automatically converted to proxy unless their type is
listed in noProxyTypes (see below). If False, arguments
are left untouched. Use proxy(obj) to manually convert
arguments before sending.
timeout float or None. Length of time to wait during synchronous
requests before returning a Request object instead.
deferGetattr True, False, or None.
If False, all attribute requests will be sent to the remote
process immediately and will block until a response is
received (or timeout has elapsed).
If True, requesting an attribute from the proxy returns a
new proxy immediately. The remote process is _not_ contacted
to make this request. This is faster, but it is possible to
request an attribute that does not exist on the proxied
object. In this case, AttributeError will not be raised
until an attempt is made to look up the attribute on the
remote process.
noProxyTypes List of object types that should _not_ be proxied when
sent to the remote process.
============= =============================================================
"""
for k in kwds:
if k not in self._proxyOptions:
raise KeyError("Unrecognized proxy option '%s'" % k)
self._proxyOptions.update(kwds)
def _getValue(self):
"""
Return the value of the proxied object
(the remote object must be picklable)
"""
return self._handler.getObjValue(self)
def _getProxyOption(self, opt):
val = self._proxyOptions[opt]
if val is None:
return self._handler.getProxyOption(opt)
return val
def _getProxyOptions(self):
return dict([(k, self._getProxyOption(k)) for k in self._proxyOptions])
def __reduce__(self):
return (unpickleObjectProxy, (self._processId, self._proxyId, self._typeStr, self._attributes))
def __repr__(self):
#objRepr = self.__getattr__('__repr__')(callSync='value')
return "<ObjectProxy for process %d, object 0x%x: %s >" % (self._processId, self._proxyId, self._typeStr)
def __getattr__(self, attr, **kwds):
"""
Calls __getattr__ on the remote object and returns the attribute
by value or by proxy depending on the options set (see
ObjectProxy._setProxyOptions and RemoteEventHandler.setProxyOptions)
If the option 'deferGetattr' is True for this proxy, then a new proxy object
is returned _without_ asking the remote object whether the named attribute exists.
This can save time when making multiple chained attribute requests,
but may also defer a possible AttributeError until later, making
them more difficult to debug.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
if opts['deferGetattr'] is True:
return self._deferredAttr(attr)
else:
#opts = self._getProxyOptions()
return self._handler.getObjAttr(self, attr, **opts)
def _deferredAttr(self, attr):
return DeferredObjectProxy(self, attr)
def __call__(self, *args, **kwds):
"""
Attempts to call the proxied object from the remote process.
Accepts extra keyword arguments:
_callSync 'off', 'sync', or 'async'
_returnType 'value', 'proxy', or 'auto'
If the remote call raises an exception on the remote process,
it will be re-raised on the local process.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
return self._handler.callObj(obj=self, args=args, kwds=kwds, **opts)
## Explicitly proxy special methods. Is there a better way to do this??
def _getSpecialAttr(self, attr):
## this just gives us an easy way to change the behavior of the special methods
return self._deferredAttr(attr)
def __getitem__(self, *args):
return self._getSpecialAttr('__getitem__')(*args)
def __setitem__(self, *args):
return self._getSpecialAttr('__setitem__')(*args, _callSync='off')
def __setattr__(self, *args):
return self._getSpecialAttr('__setattr__')(*args, _callSync='off')
def __str__(self, *args):
return self._getSpecialAttr('__str__')(*args, _returnType='value')
def __len__(self, *args):
return self._getSpecialAttr('__len__')(*args)
def __add__(self, *args):
return self._getSpecialAttr('__add__')(*args)
def __sub__(self, *args):
return self._getSpecialAttr('__sub__')(*args)
def __div__(self, *args):
return self._getSpecialAttr('__div__')(*args)
def __truediv__(self, *args):
return self._getSpecialAttr('__truediv__')(*args)
def __floordiv__(self, *args):
return self._getSpecialAttr('__floordiv__')(*args)
def __mul__(self, *args):
return self._getSpecialAttr('__mul__')(*args)
def __pow__(self, *args):
return self._getSpecialAttr('__pow__')(*args)
def __iadd__(self, *args):
return self._getSpecialAttr('__iadd__')(*args, _callSync='off')
def __isub__(self, *args):
return self._getSpecialAttr('__isub__')(*args, _callSync='off')
def __idiv__(self, *args):
return self._getSpecialAttr('__idiv__')(*args, _callSync='off')
def __itruediv__(self, *args):
return self._getSpecialAttr('__itruediv__')(*args, _callSync='off')
def __ifloordiv__(self, *args):
return self._getSpecialAttr('__ifloordiv__')(*args, _callSync='off')
def __imul__(self, *args):
return self._getSpecialAttr('__imul__')(*args, _callSync='off')
def __ipow__(self, *args):
return self._getSpecialAttr('__ipow__')(*args, _callSync='off')
def __rshift__(self, *args):
return self._getSpecialAttr('__rshift__')(*args)
def __lshift__(self, *args):
return self._getSpecialAttr('__lshift__')(*args)
def __irshift__(self, *args):
return self._getSpecialAttr('__irshift__')(*args, _callSync='off')
def __ilshift__(self, *args):
return self._getSpecialAttr('__ilshift__')(*args, _callSync='off')
def __eq__(self, *args):
return self._getSpecialAttr('__eq__')(*args)
def __ne__(self, *args):
return self._getSpecialAttr('__ne__')(*args)
def __lt__(self, *args):
return self._getSpecialAttr('__lt__')(*args)
def __gt__(self, *args):
return self._getSpecialAttr('__gt__')(*args)
def __le__(self, *args):
return self._getSpecialAttr('__le__')(*args)
def __ge__(self, *args):
return self._getSpecialAttr('__ge__')(*args)
def __and__(self, *args):
return self._getSpecialAttr('__and__')(*args)
def __or__(self, *args):
return self._getSpecialAttr('__or__')(*args)
def __xor__(self, *args):
return self._getSpecialAttr('__xor__')(*args)
def __iand__(self, *args):
return self._getSpecialAttr('__iand__')(*args, _callSync='off')
def __ior__(self, *args):
return self._getSpecialAttr('__ior__')(*args, _callSync='off')
def __ixor__(self, *args):
return self._getSpecialAttr('__ixor__')(*args, _callSync='off')
def __mod__(self, *args):
return self._getSpecialAttr('__mod__')(*args)
def __radd__(self, *args):
return self._getSpecialAttr('__radd__')(*args)
def __rsub__(self, *args):
return self._getSpecialAttr('__rsub__')(*args)
def __rdiv__(self, *args):
return self._getSpecialAttr('__rdiv__')(*args)
def __rfloordiv__(self, *args):
return self._getSpecialAttr('__rfloordiv__')(*args)
def __rtruediv__(self, *args):
return self._getSpecialAttr('__rtruediv__')(*args)
def __rmul__(self, *args):
return self._getSpecialAttr('__rmul__')(*args)
def __rpow__(self, *args):
return self._getSpecialAttr('__rpow__')(*args)
def __rrshift__(self, *args):
return self._getSpecialAttr('__rrshift__')(*args)
def __rlshift__(self, *args):
return self._getSpecialAttr('__rlshift__')(*args)
def __rand__(self, *args):
return self._getSpecialAttr('__rand__')(*args)
def __ror__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rxor__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rmod__(self, *args):
return self._getSpecialAttr('__rmod__')(*args)
def __hash__(self):
## Required for python3 since __eq__ is defined.
return id(self)
| ObjectProxy |
python | ray-project__ray | rllib/connectors/module_to_env/remove_single_ts_time_rank_from_batch.py | {
"start": 429,
"end": 2295
} | class ____(ConnectorV2):
"""
Note: This is one of the default module-to-env ConnectorV2 pieces that
are added automatically by RLlib into every module-to-env connector pipeline,
unless `config.add_default_connectors_to_module_to_env_pipeline` is set to
False.
The default module-to-env connector pipeline is:
[
GetActions,
TensorToNumpy,
UnBatchToIndividualItems,
ModuleToAgentUnmapping, # only in multi-agent setups!
RemoveSingleTsTimeRankFromBatch,
[0 or more user defined ConnectorV2 pieces],
NormalizeAndClipActions,
ListifyDataForVectorEnv,
]
"""
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Optional[Dict[str, Any]],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
# If single ts time-rank had not been added, early out.
if shared_data is None or not shared_data.get("_added_single_ts_time_rank"):
return batch
def _remove_single_ts(item, eps_id, aid, mid):
# Only remove time-rank for modules that are statefule (only for those, a
# timerank has been added).
if mid is None or rl_module[mid].is_stateful():
return tree.map_structure(lambda s: np.squeeze(s, axis=0), item)
return item
for column, column_data in batch.copy().items():
# Skip state_out (doesn't have a time rank).
if column == Columns.STATE_OUT:
continue
self.foreach_batch_item_change_in_place(
batch,
column=column,
func=_remove_single_ts,
)
return batch
| RemoveSingleTsTimeRankFromBatch |
python | gevent__gevent | src/gevent/tests/test__iwait.py | {
"start": 85,
"end": 1205
} | class ____(greentest.TestCase):
def test_noiter(self):
# Test that gevent.iwait returns objects which can be iterated upon
# without additional calls to iter()
sem1 = Semaphore()
sem2 = Semaphore()
gevent.spawn(sem1.release)
ready = next(gevent.iwait((sem1, sem2)))
self.assertEqual(sem1, ready)
def test_iwait_partial(self):
# Test that the iwait context manager allows the iterator to be
# consumed partially without a memory leak.
sem = Semaphore()
let = gevent.spawn(sem.release)
with gevent.iwait((sem,), timeout=0.01) as iterator:
self.assertEqual(sem, next(iterator))
let.get()
def test_iwait_nogarbage(self):
sem1 = Semaphore()
sem2 = Semaphore()
let = gevent.spawn(sem1.release)
with gevent.iwait((sem1, sem2)) as iterator:
self.assertEqual(sem1, next(iterator))
self.assertEqual(sem2.linkcount(), 1)
self.assertEqual(sem2.linkcount(), 0)
let.get()
if __name__ == '__main__':
greentest.main()
| Testiwait |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E731.py | {
"start": 633,
"end": 1919
} | class ____:
from typing import Callable
# OK
f: Callable[[int], int] = lambda x: 2 * x
def scope():
# E731
from typing import Callable
x: Callable[[int], int]
if True:
x = lambda: 1
else:
x = lambda: 2
return x
def scope():
# E731
from typing import Callable, ParamSpec
# ParamSpec cannot be used in this context, so do not preserve the annotation.
P = ParamSpec("P")
f: Callable[P, int] = lambda *args: len(args)
def scope():
# E731
from typing import Callable
f: Callable[[], None] = lambda: None
def scope():
# E731
from typing import Callable
f: Callable[..., None] = lambda a, b: None
def scope():
# E731
from typing import Callable
f: Callable[[int], int] = lambda x: 2 * x
# Let's use the `Callable` type from `collections.abc` instead.
def scope():
# E731
from collections.abc import Callable
f: Callable[[str, int], str] = lambda a, b: a * b
def scope():
# E731
from collections.abc import Callable
f: Callable[[str, int], tuple[str, int]] = lambda a, b: (a, b)
def scope():
# E731
from collections.abc import Callable
f: Callable[[str, int, list[str]], list[str]] = lambda a, b, /, c: [*c, a * b]
| Scope |
python | pandas-dev__pandas | asv_bench/benchmarks/index_object.py | {
"start": 6756,
"end": 7138
} | class ____:
params = [1, 2, 5]
def create_use_drop(self):
idx = Index(list(range(1_000_000)))
idx._engine
def peakmem_gc_instances(self, N):
try:
gc.disable()
for _ in range(N):
self.create_use_drop()
finally:
gc.enable()
from .pandas_vb_common import setup # noqa: F401 isort:skip
| GC |
python | etianen__django-reversion | tests/test_app/tests/test_admin.py | {
"start": 1049,
"end": 1452
} | class ____(LoginMixin, AdminMixin, TestBase):
def testAddView(self):
self.client.post(resolve_url("admin:test_app_testmodelparent_add"), {
"name": "v1",
"parent_name": "parent_v1",
})
obj = TestModelParent.objects.get()
self.assertSingleRevision(
(obj, obj.testmodel_ptr), user=self.user, comment="Added."
)
| AdminAddViewTest |
python | python-pillow__Pillow | src/PIL/PSDraw.py | {
"start": 597,
"end": 6918
} | class ____:
"""
Sets up printing to the given file. If ``fp`` is omitted,
``sys.stdout.buffer`` is assumed.
"""
def __init__(self, fp: IO[bytes] | None = None) -> None:
if not fp:
fp = sys.stdout.buffer
self.fp = fp
def begin_document(self, id: str | None = None) -> None:
"""Set up printing of a document. (Write PostScript DSC header.)"""
# FIXME: incomplete
self.fp.write(
b"%!PS-Adobe-3.0\n"
b"save\n"
b"/showpage { } def\n"
b"%%EndComments\n"
b"%%BeginDocument\n"
)
# self.fp.write(ERROR_PS) # debugging!
self.fp.write(EDROFF_PS)
self.fp.write(VDI_PS)
self.fp.write(b"%%EndProlog\n")
self.isofont: dict[bytes, int] = {}
def end_document(self) -> None:
"""Ends printing. (Write PostScript DSC footer.)"""
self.fp.write(b"%%EndDocument\nrestore showpage\n%%End\n")
if hasattr(self.fp, "flush"):
self.fp.flush()
def setfont(self, font: str, size: int) -> None:
"""
Selects which font to use.
:param font: A PostScript font name
:param size: Size in points.
"""
font_bytes = bytes(font, "UTF-8")
if font_bytes not in self.isofont:
# reencode font
self.fp.write(
b"/PSDraw-%s ISOLatin1Encoding /%s E\n" % (font_bytes, font_bytes)
)
self.isofont[font_bytes] = 1
# rough
self.fp.write(b"/F0 %d /PSDraw-%s F\n" % (size, font_bytes))
def line(self, xy0: tuple[int, int], xy1: tuple[int, int]) -> None:
"""
Draws a line between the two points. Coordinates are given in
PostScript point coordinates (72 points per inch, (0, 0) is the lower
left corner of the page).
"""
self.fp.write(b"%d %d %d %d Vl\n" % (*xy0, *xy1))
def rectangle(self, box: tuple[int, int, int, int]) -> None:
"""
Draws a rectangle.
:param box: A tuple of four integers, specifying left, bottom, width and
height.
"""
self.fp.write(b"%d %d M 0 %d %d Vr\n" % box)
def text(self, xy: tuple[int, int], text: str) -> None:
"""
Draws text at the given position. You must use
:py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method.
"""
text_bytes = bytes(text, "UTF-8")
text_bytes = b"\\(".join(text_bytes.split(b"("))
text_bytes = b"\\)".join(text_bytes.split(b")"))
self.fp.write(b"%d %d M (%s) S\n" % (xy + (text_bytes,)))
if TYPE_CHECKING:
from . import Image
def image(
self, box: tuple[int, int, int, int], im: Image.Image, dpi: int | None = None
) -> None:
"""Draw a PIL image, centered in the given box."""
# default resolution depends on mode
if not dpi:
if im.mode == "1":
dpi = 200 # fax
else:
dpi = 100 # grayscale
# image size (on paper)
x = im.size[0] * 72 / dpi
y = im.size[1] * 72 / dpi
# max allowed size
xmax = float(box[2] - box[0])
ymax = float(box[3] - box[1])
if x > xmax:
y = y * xmax / x
x = xmax
if y > ymax:
x = x * ymax / y
y = ymax
dx = (xmax - x) / 2 + box[0]
dy = (ymax - y) / 2 + box[1]
self.fp.write(b"gsave\n%f %f translate\n" % (dx, dy))
if (x, y) != im.size:
# EpsImagePlugin._save prints the image at (0,0,xsize,ysize)
sx = x / im.size[0]
sy = y / im.size[1]
self.fp.write(b"%f %f scale\n" % (sx, sy))
EpsImagePlugin._save(im, self.fp, "", 0)
self.fp.write(b"\ngrestore\n")
# --------------------------------------------------------------------
# PostScript driver
#
# EDROFF.PS -- PostScript driver for Edroff 2
#
# History:
# 94-01-25 fl: created (edroff 2.04)
#
# Copyright (c) Fredrik Lundh 1994.
#
EDROFF_PS = b"""\
/S { show } bind def
/P { moveto show } bind def
/M { moveto } bind def
/X { 0 rmoveto } bind def
/Y { 0 exch rmoveto } bind def
/E { findfont
dup maxlength dict begin
{
1 index /FID ne { def } { pop pop } ifelse
} forall
/Encoding exch def
dup /FontName exch def
currentdict end definefont pop
} bind def
/F { findfont exch scalefont dup setfont
[ exch /setfont cvx ] cvx bind def
} bind def
"""
#
# VDI.PS -- PostScript driver for VDI meta commands
#
# History:
# 94-01-25 fl: created (edroff 2.04)
#
# Copyright (c) Fredrik Lundh 1994.
#
VDI_PS = b"""\
/Vm { moveto } bind def
/Va { newpath arcn stroke } bind def
/Vl { moveto lineto stroke } bind def
/Vc { newpath 0 360 arc closepath } bind def
/Vr { exch dup 0 rlineto
exch dup 0 exch rlineto
exch neg 0 rlineto
0 exch neg rlineto
setgray fill } bind def
/Tm matrix def
/Ve { Tm currentmatrix pop
translate scale newpath 0 0 .5 0 360 arc closepath
Tm setmatrix
} bind def
/Vf { currentgray exch setgray fill setgray } bind def
"""
#
# ERROR.PS -- Error handler
#
# History:
# 89-11-21 fl: created (pslist 1.10)
#
ERROR_PS = b"""\
/landscape false def
/errorBUF 200 string def
/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def
errordict begin /handleerror {
initmatrix /Courier findfont 10 scalefont setfont
newpath 72 720 moveto $error begin /newerror false def
(PostScript Error) show errorNL errorNL
(Error: ) show
/errorname load errorBUF cvs show errorNL errorNL
(Command: ) show
/command load dup type /stringtype ne { errorBUF cvs } if show
errorNL errorNL
(VMstatus: ) show
vmstatus errorBUF cvs show ( bytes available, ) show
errorBUF cvs show ( bytes used at level ) show
errorBUF cvs show errorNL errorNL
(Operand stargck: ) show errorNL /ostargck load {
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
} forall errorNL
(Execution stargck: ) show errorNL /estargck load {
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
} forall
end showpage
} def end
"""
| PSDraw |
python | ApeWorX__ape | src/ape/managers/networks.py | {
"start": 979,
"end": 2231
} | class ____(BaseModel):
"""
Cached data for node subprocesses managed by Ape.
"""
network_choice: str
"""
The network triple ``ecosystem:network:node``.
"""
ipc_path: Optional[Path] = None
"""
The IPC path this node process communicates on.
"""
http_uri: Optional[str] = None
"""
The HTTP URI this node process exposes.
"""
ws_uri: Optional[str] = None
"""
The websockets URI this node process exposes.
"""
@log_instead_of_fail(default="<NodeProcessData>")
def __repr__(self) -> str:
if ipc := self.ipc_path:
return f"{self.network_choice} - {clean_path(ipc)}"
elif uri := (self.http_uri or self.ws_uri):
return f"{self.network_choice} - {uri}"
return self.network_choice
def matches_provider(self, provider: "SubprocessProvider") -> bool:
if self.network_choice != f"{provider.network.choice}:{provider.name}":
return False
# Skip if any of the connection paths (IPC, HTTP, WS) differ
for attr in ("ipc_path", "http_uri", "ws_uri"):
if getattr(provider, attr, None) != getattr(self, attr, None):
return False
return True
| NodeProcessData |
python | ray-project__ray | python/ray/tests/test_runtime_env_container.py | {
"start": 4909,
"end": 8457
} | class ____:
def test_container_with_config(self, api_version):
"""`config` should be allowed with `container`"""
runtime_env = {"config": {"setup_timeout_seconds": 10}}
if api_version == "container":
runtime_env["container"] = {"image": NESTED_IMAGE_NAME}
else:
runtime_env["image_uri"] = NESTED_IMAGE_NAME
@ray.remote(runtime_env=runtime_env)
def f():
return ray.put((1, 10))
def test_container_with_env_vars(self, api_version):
"""`env_vars` should be allowed with `container`"""
runtime_env = {"env_vars": {"HELLO": "WORLD"}}
if api_version == "container":
runtime_env["container"] = {"image": NESTED_IMAGE_NAME}
else:
runtime_env["image_uri"] = NESTED_IMAGE_NAME
@ray.remote(runtime_env=runtime_env)
def f():
return ray.put((1, 10))
def test_container_with_pip(self, api_version):
with pytest.raises(ValueError, match=EXPECTED_ERROR.format(api_version)):
runtime_env = {"pip": ["requests"]}
if api_version == "container":
runtime_env["container"] = {"image": NESTED_IMAGE_NAME}
else:
runtime_env["image_uri"] = NESTED_IMAGE_NAME
@ray.remote(runtime_env=runtime_env)
def f():
return ray.put((1, 10))
def test_container_with_conda(self, api_version):
with pytest.raises(ValueError, match=EXPECTED_ERROR.format(api_version)):
runtime_env = {"conda": ["requests"]}
if api_version == "container":
runtime_env["container"] = {"image": NESTED_IMAGE_NAME}
else:
runtime_env["image_uri"] = NESTED_IMAGE_NAME
@ray.remote(runtime_env=runtime_env)
def f():
return ray.put((1, 10))
def test_container_with_py_modules(self, api_version):
with pytest.raises(ValueError, match=EXPECTED_ERROR.format(api_version)):
runtime_env = {"py_modules": ["requests"]}
if api_version == "container":
runtime_env["container"] = {"image": NESTED_IMAGE_NAME}
else:
runtime_env["image_uri"] = NESTED_IMAGE_NAME
@ray.remote(runtime_env=runtime_env)
def f():
return ray.put((1, 10))
def test_container_with_working_dir(self, api_version):
with pytest.raises(ValueError, match=EXPECTED_ERROR.format(api_version)):
runtime_env = {"working_dir": "."}
if api_version == "container":
runtime_env["container"] = {"image": NESTED_IMAGE_NAME}
else:
runtime_env["image_uri"] = NESTED_IMAGE_NAME
@ray.remote(runtime_env=runtime_env)
def f():
return ray.put((1, 10))
def test_container_with_pip_and_working_dir(self, api_version):
with pytest.raises(ValueError, match=EXPECTED_ERROR.format(api_version)):
runtime_env = {"pip": ["requests"], "working_dir": "."}
if api_version == "container":
runtime_env["container"] = {"image": NESTED_IMAGE_NAME}
else:
runtime_env["image_uri"] = NESTED_IMAGE_NAME
@ray.remote(runtime_env=runtime_env)
def f():
return ray.put((1, 10))
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestContainerRuntimeEnvWithOtherRuntimeEnv |
python | tensorflow__tensorflow | tensorflow/python/ops/math_grad_test.py | {
"start": 3470,
"end": 4981
} | class ____(test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (dtypes.complex64, dtypes.complex128):
value = math_ops.complex(
self._biasedRandN(
shape, bias=bias, sigma=sigma),
self._biasedRandN(
shape, bias=bias, sigma=sigma))
else:
value = ops.convert_to_tensor(
self._biasedRandN(
shape, bias=bias), dtype=dtype)
with self.cached_session():
output = math_ops.abs(value)
error = gradient_checker.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
@test_util.run_deprecated_v1
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
| AbsOpTest |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 207936,
"end": 208794
} | class ____(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
| UDPTimeoutTest |
python | astropy__astropy | astropy/extern/ply/lex.py | {
"start": 3158,
"end": 3883
} | class ____(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
| NullLogger |
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 5652,
"end": 5820
} | class ____(Node):
def __init__(self, s):
super().__init__()
self.s = str(s)
def to_string(self, with_parens=None):
return self.s
| BaseNode |
python | facebook__pyre-check | client/tests/dataclasses_merge_test.py | {
"start": 1302,
"end": 4620
} | class ____(testslide.TestCase):
def test_basic(self) -> None:
self.assertEqual(
Basic.merge(Basic(x=1, y="base"), Basic(x=2, y="override")),
Basic(x=2, y="override"),
)
self.assertEqual(
Basic.merge(Basic(x=1, y="base"), Basic(x=2, y=None)),
Basic(x=2, y="base"),
)
self.assertEqual(
Basic.merge(Basic(x=1, y="base"), Basic(x=None, y="override")),
Basic(x=1, y="override"),
)
self.assertEqual(
Basic.merge(Basic(x=1, y="base"), Basic(x=None, y=None)),
Basic(x=1, y="base"),
)
def test_nesting(self) -> None:
self.assertEqual(
Nesting.merge(
Nesting(a=True, b=Basic(x=1, y="base")),
Nesting(a=False, b=Basic(x=2, y="override")),
),
Nesting(a=False, b=Basic(x=2, y="override")),
)
self.assertEqual(
Nesting.merge(
Nesting(a=True, b=Basic(x=1, y="base")),
Nesting(a=False, b=Basic(x=2, y=None)),
),
Nesting(a=False, b=Basic(x=2, y="base")),
)
self.assertEqual(
Nesting.merge(
Nesting(a=True, b=Basic(x=1, y="base")),
Nesting(a=None, b=Basic(x=None, y="override")),
),
Nesting(a=True, b=Basic(x=1, y="override")),
)
self.assertEqual(
Nesting.merge(
Nesting(a=True, b=Basic(x=1, y="base")),
Nesting(a=None, b=Basic(x=None, y=None)),
),
Nesting(a=True, b=Basic(x=1, y="base")),
)
def test_prepend(self) -> None:
self.assertEqual(
Prepend.merge(Prepend(x=[]), Prepend(x=[2])),
Prepend(x=[2]),
)
self.assertEqual(
Prepend.merge(Prepend(x=[1]), Prepend(x=[])),
Prepend(x=[1]),
)
self.assertEqual(
Prepend.merge(Prepend(x=[1, 2]), Prepend(x=[3, 4])),
Prepend(x=[3, 4, 1, 2]),
)
def test_raise_when_overwritten(self) -> None:
self.assertEqual(
RaiseWhenOverwritten.merge(
RaiseWhenOverwritten(x=1), RaiseWhenOverwritten(x=None)
),
RaiseWhenOverwritten(x=1),
)
self.assertEqual(
RaiseWhenOverwritten.merge(
RaiseWhenOverwritten(x=None), RaiseWhenOverwritten(x=2)
),
RaiseWhenOverwritten(x=2),
)
self.assertEqual(
RaiseWhenOverwritten.merge(
RaiseWhenOverwritten(x=None), RaiseWhenOverwritten(x=None)
),
RaiseWhenOverwritten(x=None),
)
with self.assertRaises(DataclassMergeError):
RaiseWhenOverwritten.merge(
RaiseWhenOverwritten(x=1), RaiseWhenOverwritten(x=2)
)
def test_custom(self) -> None:
self.assertEqual(Custom.merge(Custom(x=1), Custom(x=2)), Custom(x=1))
self.assertEqual(Custom.merge(Custom(x=1), Custom(x=None)), Custom(x=1))
self.assertEqual(Custom.merge(Custom(x=None), Custom(x=2)), Custom(x=None))
self.assertEqual(Custom.merge(Custom(x=None), Custom(x=None)), Custom(x=None))
| DataclassMergeTest |
python | pytorch__pytorch | torch/distributed/_composable/contract.py | {
"start": 880,
"end": 1021
} | class ____:
pass
_TState = TypeVar("_TState", bound="_State", covariant=True)
_M = TypeVar("_M", nn.Module, list[nn.Module])
| RegistryItem |
python | getsentry__sentry | src/sentry/audit_log/events.py | {
"start": 4221,
"end": 4588
} | class ____(AuditLogEvent):
def __init__(self) -> None:
super().__init__(event_id=11, name="ORG_EDIT", api_name="org.edit")
def render(self, audit_log_entry: AuditLogEntry) -> str:
items_string = ", ".join(f"{k} {v}" for k, v in audit_log_entry.data.items())
return "edited the organization setting: " + items_string
| OrgEditAuditLogEvent |
python | scrapy__scrapy | tests/mockserver/http_resources.py | {
"start": 6093,
"end": 6216
} | class ____(LeafResource):
def render(self, request):
return request.content.read()
| ArbitraryLengthPayloadResource |
python | sphinx-doc__sphinx | tests/test_util/test_util_typing.py | {
"start": 1615,
"end": 1675
} | class ____(MyClass1):
__qualname__ = '<MyClass2>'
| MyClass2 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP039.py | {
"start": 60,
"end": 106
} | class ____ \
():
pass
@decorator()
| A |
python | google__jax | tests/state_test.py | {
"start": 43499,
"end": 49967
} | class ____(jtu.JaxTestCase):
@hp.given(get_vmap_params())
@hp.settings(deadline=None, print_blob=True,
max_examples=jtu.NUM_GENERATED_CASES.value)
def test_get_vmap(self, get_vmap_param: GetVmapParams):
indexed_dims = get_vmap_param.vmap_index_param.index_param.indexed_dims
def f(ref, *non_slice_idx):
idx = _pack_idx(non_slice_idx, indexed_dims)
return [ref_get(ref, idx)]
ref_aval = get_vmap_param.vmap_index_param.index_param.ref_aval
bat_ref_aval = get_vmap_param.vmap_index_param.bat_ref_aval
bat_non_slice_idx_avals = get_vmap_param.vmap_index_param.bat_non_slice_idx_avals
ref_bdim = get_vmap_param.vmap_index_param.ref_bdim
idx_bdims = get_vmap_param.vmap_index_param.non_slice_idx_bdims
out_bdim = get_vmap_param.vmap_index_param.slice_bdim
non_slice_idx = get_vmap_param.bat_idxs
idx_avals = get_vmap_param.vmap_index_param.index_param.idx_avals
ref = get_vmap_param.bat_ref
f_batched = jax.vmap(f, in_axes=(ref_bdim, *idx_bdims), out_axes=[out_bdim])
stateful_jaxpr, _, stateful_consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f_batched, 1 + len(bat_non_slice_idx_avals)),
[bat_ref_aval, *bat_non_slice_idx_avals])
jaxpr, consts = discharge_state(stateful_jaxpr, stateful_consts)
discharge_of_vmap_ans = core.eval_jaxpr(jaxpr, consts, ref, *non_slice_idx)
# vmap-of-discharge
stateful_jaxpr, _, stateful_consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 1 + len(idx_avals)), [ref_aval, *idx_avals])
jaxpr_, consts_ = discharge_state(stateful_jaxpr, stateful_consts)
f_batched = jax.vmap(partial(core.eval_jaxpr, jaxpr_, consts_),
in_axes=(ref_bdim, *idx_bdims),
out_axes=[out_bdim, ref_bdim])
vmap_of_discharge_ans = f_batched(ref, *non_slice_idx)
self.assertAllClose(discharge_of_vmap_ans, vmap_of_discharge_ans,
check_dtypes=False)
@hp.given(set_vmap_params())
@hp.settings(deadline=None, print_blob=True,
max_examples=jtu.NUM_GENERATED_CASES.value)
def test_set_vmap(self, set_vmap_param: SetVmapParams):
if jtu.test_device_matches(["gpu"]):
self.skipTest("Scatter is nondeterministic on GPU")
indexed_dims = set_vmap_param.vmap_index_param.index_param.indexed_dims
def f(ref, val, *non_slice_idx):
idx = _pack_idx(non_slice_idx, indexed_dims)
ref_set(ref, idx, val)
return []
ref_aval = set_vmap_param.vmap_index_param.index_param.ref_aval
bat_ref_aval = set_vmap_param.vmap_index_param.bat_ref_aval
bat_non_slice_idx_avals = set_vmap_param.vmap_index_param.bat_non_slice_idx_avals
ref_bdim = set_vmap_param.vmap_index_param.ref_bdim
idx_bdims = set_vmap_param.vmap_index_param.non_slice_idx_bdims
non_slice_idx = set_vmap_param.bat_idxs
idx_avals = set_vmap_param.vmap_index_param.index_param.idx_avals
ref = set_vmap_param.bat_ref
val = set_vmap_param.bat_val
bat_val_aval = set_vmap_param.vmap_index_param.bat_slice_aval
val_aval = set_vmap_param.vmap_index_param.index_param.slice_aval
val_bdim = set_vmap_param.vmap_index_param.slice_bdim
f_batched = jax.vmap(f, in_axes=(ref_bdim, val_bdim, *idx_bdims),
out_axes=[])
stateful_jaxpr, _, stateful_consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f_batched, 2 + len(bat_non_slice_idx_avals)),
[bat_ref_aval, bat_val_aval, *bat_non_slice_idx_avals])
jaxpr, consts = discharge_state(stateful_jaxpr, stateful_consts)
discharge_of_vmap_ans = core.eval_jaxpr(jaxpr, consts, ref, val, *non_slice_idx)
# vmap-of-discharge
stateful_jaxpr, _, stateful_consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 2 + len(idx_avals)), [ref_aval, val_aval, *idx_avals])
jaxpr_, consts_ = discharge_state(stateful_jaxpr, stateful_consts)
f_batched = jax.vmap(partial(core.eval_jaxpr, jaxpr_, consts_),
in_axes=(ref_bdim, val_bdim, *idx_bdims),
out_axes=[ref_bdim])
vmap_of_discharge_ans = f_batched(ref, val, *non_slice_idx)
self.assertAllClose(discharge_of_vmap_ans, vmap_of_discharge_ans,
check_dtypes=False)
@hp.given(set_vmap_params())
@hp.settings(deadline=None, print_blob=True,
max_examples=jtu.NUM_GENERATED_CASES.value)
def test_addupdate_vmap(self, set_vmap_param: SetVmapParams):
indexed_dims = set_vmap_param.vmap_index_param.index_param.indexed_dims
def f(ref, val, *non_slice_idx):
idx = _pack_idx(non_slice_idx, indexed_dims)
ref_addupdate(ref, idx, val)
return []
ref_aval = set_vmap_param.vmap_index_param.index_param.ref_aval
bat_ref_aval = set_vmap_param.vmap_index_param.bat_ref_aval
bat_non_slice_idx_avals = set_vmap_param.vmap_index_param.bat_non_slice_idx_avals
ref_bdim = set_vmap_param.vmap_index_param.ref_bdim
idx_bdims = set_vmap_param.vmap_index_param.non_slice_idx_bdims
non_slice_idx = set_vmap_param.bat_idxs
idx_avals = set_vmap_param.vmap_index_param.index_param.idx_avals
ref = set_vmap_param.bat_ref
val = set_vmap_param.bat_val
bat_val_aval = set_vmap_param.vmap_index_param.bat_slice_aval
val_aval = set_vmap_param.vmap_index_param.index_param.slice_aval
val_bdim = set_vmap_param.vmap_index_param.slice_bdim
f_batched = jax.vmap(f, in_axes=(ref_bdim, val_bdim, *idx_bdims),
out_axes=[])
stateful_jaxpr, _, stateful_consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f_batched, 2 + len(bat_non_slice_idx_avals)),
[bat_ref_aval, bat_val_aval, *bat_non_slice_idx_avals])
jaxpr, consts = discharge_state(stateful_jaxpr, stateful_consts)
discharge_of_vmap_ans = core.eval_jaxpr(jaxpr, consts, ref, val, *non_slice_idx)
# vmap-of-discharge
stateful_jaxpr, _, stateful_consts = pe.trace_to_jaxpr_dynamic(
wrap_init(f, 2 + len(idx_avals)), [ref_aval, val_aval, *idx_avals])
jaxpr_, consts_ = discharge_state(stateful_jaxpr, stateful_consts)
f_batched = jax.vmap(partial(core.eval_jaxpr, jaxpr_, consts_),
in_axes=(ref_bdim, val_bdim, *idx_bdims),
out_axes=[ref_bdim])
vmap_of_discharge_ans = f_batched(ref, val, *non_slice_idx)
self.assertAllClose(discharge_of_vmap_ans, vmap_of_discharge_ans,
check_dtypes=False)
| StateHypothesisTest |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 13192,
"end": 13517
} | class ____(TypedDict):
root_module: Required[str]
defs_module: NotRequired[str]
code_location_target_module: NotRequired[str]
code_location_name: NotRequired[str]
registry_modules: NotRequired[list[str]]
# ########################
# ##### WORKSPACE
# ########################
@dataclass
| DgRawProjectConfig |
python | keras-team__keras | keras/src/layers/pooling/global_average_pooling2d.py | {
"start": 265,
"end": 2469
} | class ____(BaseGlobalPooling):
"""Global average pooling operation for 2D data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, height, weight)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
spatial dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, height, width, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, height, width)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, 1, 1, channels)`
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, 1, 1)`
Example:
>>> x = np.random.rand(2, 4, 5, 3)
>>> y = keras.layers.GlobalAveragePooling2D()(x)
>>> y.shape
(2, 3)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=2,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
if self.data_format == "channels_last":
return ops.mean(inputs, axis=[1, 2], keepdims=self.keepdims)
return ops.mean(inputs, axis=[2, 3], keepdims=self.keepdims)
| GlobalAveragePooling2D |
python | wireservice__csvkit | csvkit/utilities/csvjoin.py | {
"start": 121,
"end": 5917
} | class ____(CSVKitUtility):
description = 'Execute a SQL-like join to merge CSV files on a specified column or columns.'
epilog = "Note that the join operation requires reading all files into memory. Don't try this on very large files."
# Override 'f' because the utility accepts multiple files.
override_flags = ['f']
def add_arguments(self):
self.argparser.add_argument(
metavar='FILE', nargs='*', dest='input_paths', default=['-'],
help='The CSV files to operate on. If only one is specified, it will be copied to STDOUT.')
self.argparser.add_argument(
'-c', '--columns', dest='columns',
help='The column name(s) on which to join. Should be either one name (or index) or a comma-separated list '
'with one name (or index) per file, in the same order in which the files were specified. If not '
'specified, the two files will be joined sequentially without matching.')
self.argparser.add_argument(
'--outer', dest='outer_join', action='store_true',
help='Perform a full outer join, rather than the default inner join.')
self.argparser.add_argument(
'--left', dest='left_join', action='store_true',
help='Perform a left outer join, rather than the default inner join. If more than two files are provided '
'this will be executed as a sequence of left outer joins, starting at the left.')
self.argparser.add_argument(
'--right', dest='right_join', action='store_true',
help='Perform a right outer join, rather than the default inner join. If more than two files are provided '
'this will be executed as a sequence of right outer joins, starting at the right.')
self.argparser.add_argument(
'-y', '--snifflimit', dest='sniff_limit', type=int, default=1024,
help='Limit CSV dialect sniffing to the specified number of bytes. '
'Specify "0" to disable sniffing entirely, or "-1" to sniff the entire file.')
self.argparser.add_argument(
'-I', '--no-inference', dest='no_inference', action='store_true',
help='Disable type inference (and --locale, --date-format, --datetime-format, --no-leading-zeroes) '
'when parsing the input.')
def main(self):
if isatty(sys.stdin) and self.args.input_paths == ['-']:
self.argparser.error('You must provide an input file or piped data.')
self.input_files = []
for path in self.args.input_paths:
self.input_files.append(self._open_input_file(path))
if self.args.columns:
join_column_names = self._parse_join_column_names(self.args.columns)
if len(join_column_names) == 1:
join_column_names = join_column_names * len(self.input_files)
if len(join_column_names) != len(self.input_files):
self.argparser.error('The number of join column names must match the number of files, or be a single '
'column name that exists in all files.')
if (self.args.left_join or self.args.right_join or self.args.outer_join) and not self.args.columns:
self.argparser.error('You must provide join column names when performing an outer join.')
if self.args.left_join and self.args.right_join:
self.argparser.error('It is not valid to specify both a left and a right join.')
tables = []
sniff_limit = self.args.sniff_limit if self.args.sniff_limit != -1 else None
column_types = self.get_column_types()
for f in self.input_files:
tables.append(agate.Table.from_csv(
f,
skip_lines=self.args.skip_lines,
sniff_limit=sniff_limit,
column_types=column_types,
**self.reader_kwargs,
))
f.close()
join_column_ids = []
if self.args.columns:
for i, table in enumerate(tables):
join_column_ids.append(match_column_identifier(table.column_names, join_column_names[i]))
jointab = tables[0]
if self.args.left_join:
# Left outer join
for i, table in enumerate(tables[1:]):
jointab = agate.Table.join(jointab, table, join_column_ids[0], join_column_ids[i + 1])
elif self.args.right_join:
# Right outer join
jointab = tables[-1]
remaining_tables = tables[:-1]
remaining_tables.reverse()
for i, table in enumerate(remaining_tables):
jointab = agate.Table.join(jointab, table, join_column_ids[-1], join_column_ids[-(i + 2)])
elif self.args.outer_join:
# Full outer join
for i, table in enumerate(tables[1:]):
jointab = agate.Table.join(jointab, table, join_column_ids[0], join_column_ids[i + 1], full_outer=True)
elif self.args.columns:
# Inner join
for i, table in enumerate(tables[1:]):
jointab = agate.Table.join(jointab, table, join_column_ids[0], join_column_ids[i + 1], inner=True)
else:
# Sequential join
for table in tables[1:]:
jointab = agate.Table.join(jointab, table, full_outer=True)
jointab.to_csv(self.output_file, **self.writer_kwargs)
def _parse_join_column_names(self, join_string):
"""
Parse a list of join columns.
"""
return list(map(str.strip, join_string.split(',')))
def launch_new_instance():
utility = CSVJoin()
utility.run()
if __name__ == '__main__':
launch_new_instance()
| CSVJoin |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_bool.py | {
"start": 373,
"end": 517
} | class ____:
def __bool__(self):
return return_int() # [invalid-bool-return]
# These testcases should NOT raise errors
| ComplexReturn |
python | readthedocs__readthedocs.org | readthedocs/doc_builder/backends/sphinx.py | {
"start": 6874,
"end": 7034
} | class ____(HtmlBuilder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sphinx_builder = "dirhtml"
| HtmlDirBuilder |
python | tensorflow__tensorflow | tensorflow/python/saved_model/save_test.py | {
"start": 57796,
"end": 59548
} | class ____(test.TestCase):
def test_export_meta_graph(self):
root = autotrackable.AutoTrackable()
root.variable = resource_variable_ops.UninitializedVariable(
name="some_variable", dtype=dtypes.float32)
@def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
def multiply_var(x):
return root.variable * x
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def update(y):
root.variable.assign_add(y)
# TODO(b/150393409): All functions exported as signatures must have at
# least one output.
return 0
@def_function.function(input_signature=[])
def initialize():
root.variable.assign(1.0)
# TODO(b/150393409): All functions exported as signatures must have at
# least one output.
return 0
save_path = os.path.join(self.get_temp_dir(), "meta_graph.pb")
save.export_meta_graph(
root,
save_path,
signatures={
"multiply_var": multiply_var,
"initialize": initialize,
"update": update
})
with ops.Graph().as_default(), session_lib.Session() as session:
saver.import_meta_graph(save_path)
meta_graph_def = meta_graph.read_meta_graph_file(save_path)
# Initialize variable to 1
_run_signature(session, meta_graph_def, {}, "initialize")
out = _run_signature(session, meta_graph_def, {"x": 3}, "multiply_var")
self.assertAllEqual(out, {"output_0": 3})
# Adds 2 to the variable. Variable is now 3
_run_signature(session, meta_graph_def, {"y": 2}, "update")
out = _run_signature(session, meta_graph_def, {"x": 4}, "multiply_var")
self.assertAllEqual(out, {"output_0": 12})
| ExportMetaGraphTests |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 212216,
"end": 212884
} | class ____(TestCase):
def test_basic(self):
# Example calculation from:
# https://en.wikipedia.org/wiki/Discrete_Fourier_transform#Example
xarr = [1, 2 - 1j, -1j, -1 + 2j]
Xarr = [2, -2 - 2j, -2j, 4 + 4j]
self.assertTrue(all(map(cmath.isclose, mi.dft(xarr), Xarr)))
self.assertTrue(all(map(cmath.isclose, mi.idft(Xarr), xarr)))
def test_roundtrip(self):
for _ in range(1_000):
N = randrange(35)
xarr = [complex(random(), random()) for i in range(N)]
Xarr = list(mi.dft(xarr))
assert all(map(cmath.isclose, mi.idft(Xarr), xarr))
| DiscreteFourierTransformTests |
python | huggingface__transformers | tests/models/wav2vec2/test_processing_wav2vec2.py | {
"start": 916,
"end": 3427
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Wav2Vec2Processor
audio_input_name = "input_values"
text_input_name = "labels"
@classmethod
def _setup_feature_extractor(cls):
feature_extractor_class = cls._get_component_class_from_processor("feature_extractor")
feature_extractor_map = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16000,
"return_attention_mask": False,
"do_normalize": True,
}
return feature_extractor_class(**feature_extractor_map)
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
vocab = "<pad> <s> </s> <unk> | E T A O N I H S R D L U M W C F G Y P B V K ' X J Q Z".split(" ")
vocab_tokens = dict(zip(vocab, range(len(vocab))))
vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
add_kwargs_tokens_map = {
"pad_token": "<pad>",
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
return tokenizer_class.from_pretrained(cls.tmpdirname, **add_kwargs_tokens_map)
# todo: check why this test is failing
@unittest.skip("Failing for unknown reason")
def test_overlapping_text_audio_kwargs_handling(self):
pass
@unittest.skip("Wav2Vec2BertProcessor changes input_features")
def test_processor_with_multiple_inputs(self):
pass
def test_feature_extractor(self):
feature_extractor = self.get_component("feature_extractor")
processor = self.get_processor()
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_model_input_names(self):
processor = self.get_processor()
text = "lower newer"
audio_inputs = self.prepare_audio_inputs()
inputs = processor(text=text, audio=audio_inputs, return_attention_mask=True, return_tensors="pt")
self.assertSetEqual(set(inputs.keys()), set(processor.model_input_names))
| Wav2Vec2ProcessorTest |
python | langchain-ai__langchain | libs/core/langchain_core/output_parsers/xml.py | {
"start": 5579,
"end": 10974
} | class ____(BaseTransformOutputParser):
"""Parse an output using xml format.
Returns a dictionary of tags.
"""
tags: list[str] | None = None
"""Tags to tell the LLM to expect in the XML output.
Note this may not be perfect depending on the LLM implementation.
For example, with `tags=["foo", "bar", "baz"]`:
1. A well-formatted XML instance:
`"<foo>\n <bar>\n <baz></baz>\n </bar>\n</foo>"`
2. A badly-formatted XML instance (missing closing tag for 'bar'):
`"<foo>\n <bar>\n </foo>"`
3. A badly-formatted XML instance (unexpected 'tag' element):
`"<foo>\n <tag>\n </tag>\n</foo>"`
"""
encoding_matcher: re.Pattern = re.compile(
r"<([^>]*encoding[^>]*)>\n(.*)", re.MULTILINE | re.DOTALL
)
parser: Literal["defusedxml", "xml"] = "defusedxml"
"""Parser to use for XML parsing. Can be either `'defusedxml'` or `'xml'`.
* `'defusedxml'` is the default parser and is used to prevent XML vulnerabilities
present in some distributions of Python's standard library xml.
`defusedxml` is a wrapper around the standard library parser that
sets up the parser with secure defaults.
* `'xml'` is the standard library parser.
Use `xml` only if you are sure that your distribution of the standard library is not
vulnerable to XML vulnerabilities.
Please review the following resources for more information:
* https://docs.python.org/3/library/xml.html#xml-vulnerabilities
* https://github.com/tiran/defusedxml
The standard library relies on [`libexpat`](https://github.com/libexpat/libexpat)
for parsing XML.
"""
def get_format_instructions(self) -> str:
"""Return the format instructions for the XML output."""
return XML_FORMAT_INSTRUCTIONS.format(tags=self.tags)
def parse(self, text: str) -> dict[str, str | list[Any]]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Returns:
A `dict` representing the parsed XML.
Raises:
OutputParserException: If the XML is not well-formed.
ImportError: If defus`edxml is not installed and the `defusedxml` parser is
requested.
"""
# Try to find XML string within triple backticks
# Imports are temporarily placed here to avoid issue with caching on CI
# likely if you're reading this you can move them to the top of the file
if self.parser == "defusedxml":
if not _HAS_DEFUSEDXML:
msg = (
"defusedxml is not installed. "
"Please install it to use the defusedxml parser."
"You can install it with `pip install defusedxml`"
"See https://github.com/tiran/defusedxml for more details"
)
raise ImportError(msg)
et = ElementTree # Use the defusedxml parser
else:
et = ET # Use the standard library parser
match = re.search(r"```(xml)?(.*)```", text, re.DOTALL)
if match is not None:
# If match found, use the content within the backticks
text = match.group(2)
encoding_match = self.encoding_matcher.search(text)
if encoding_match:
text = encoding_match.group(2)
text = text.strip()
try:
root = et.fromstring(text)
return self._root_to_dict(root)
except et.ParseError as e:
msg = f"Failed to parse XML format from completion {text}. Got: {e}"
raise OutputParserException(msg, llm_output=text) from e
@override
def _transform(self, input: Iterator[str | BaseMessage]) -> Iterator[AddableDict]:
streaming_parser = _StreamingParser(self.parser)
for chunk in input:
yield from streaming_parser.parse(chunk)
streaming_parser.close()
@override
async def _atransform(
self, input: AsyncIterator[str | BaseMessage]
) -> AsyncIterator[AddableDict]:
streaming_parser = _StreamingParser(self.parser)
async for chunk in input:
for output in streaming_parser.parse(chunk):
yield output
streaming_parser.close()
def _root_to_dict(self, root: ET.Element) -> dict[str, str | list[Any]]:
"""Converts xml tree to python dictionary."""
if root.text and bool(re.search(r"\S", root.text)):
# If root text contains any non-whitespace character it
# returns {root.tag: root.text}
return {root.tag: root.text}
result: dict = {root.tag: []}
for child in root:
if len(child) == 0:
result[root.tag].append({child.tag: child.text})
else:
result[root.tag].append(self._root_to_dict(child))
return result
@property
def _type(self) -> str:
return "xml"
def nested_element(path: list[str], elem: ET.Element) -> Any:
"""Get nested element from path.
Args:
path: The path to the element.
elem: The element to extract.
Returns:
The nested element.
"""
if len(path) == 0:
return AddableDict({elem.tag: elem.text})
return AddableDict({path[0]: [nested_element(path[1:], elem)]})
| XMLOutputParser |
python | pypa__pip | src/pip/_internal/cli/parser.py | {
"start": 3417,
"end": 4589
} | class ____(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser.
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
Also redact auth from url type options
"""
def expand_default(self, option: optparse.Option) -> str:
default_values = None
if self.parser is not None:
assert isinstance(self.parser, ConfigOptionParser)
self.parser._update_defaults(self.parser.defaults)
assert option.dest is not None
default_values = self.parser.defaults.get(option.dest)
help_text = super().expand_default(option)
if default_values and option.metavar == "URL":
if isinstance(default_values, str):
default_values = [default_values]
# If its not a list, we should abort and just return the help text
if not isinstance(default_values, list):
default_values = []
for val in default_values:
help_text = help_text.replace(val, redact_auth_from_url(val))
return help_text
| UpdatingDefaultsHelpFormatter |
python | conda__conda | conda/__init__.py | {
"start": 4515,
"end": 6009
} | class ____(CondaError):
return_code = 0
ACTIVE_SUBPROCESSES: Iterable[Popen] = set()
def conda_signal_handler(signum: int, frame: Any):
# This function is in the base __init__.py so that it can be monkey-patched by other code
# if downstream conda users so choose. The biggest danger of monkey-patching is that
# unlink/link transactions don't get rolled back if interrupted mid-transaction.
for p in ACTIVE_SUBPROCESSES:
if p.poll() is None:
p.send_signal(signum)
from .exceptions import CondaSignalInterrupt
raise CondaSignalInterrupt(signum)
def _default(self, obj):
from frozendict import frozendict
from .deprecations import deprecated
if isinstance(obj, frozendict):
deprecated.topic(
"26.3",
"26.9",
topic="Monkey-patching `json.JSONEncoder` to support `frozendict`",
addendum="Use `conda.common.serialize.json.CondaJSONEncoder` instead.",
)
return dict(obj)
elif hasattr(obj, "to_json"):
deprecated.topic(
"26.3",
"26.9",
topic="Monkey-patching `json.JSONEncoder` to support `obj.to_json()`",
addendum="Use `conda.common.serialize.json.CondaJSONEncoder` instead.",
)
return obj.to_json()
return _default.default(obj)
# FUTURE: conda 26.3, remove the following monkey patching
_default.default = JSONEncoder().default
JSONEncoder.default = _default
| CondaExitZero |
python | weaviate__weaviate-python-client | weaviate/backup/backup.py | {
"start": 284,
"end": 675
} | class ____(str, Enum):
"""Which compression level should be used to compress the backup."""
DEFAULT = "DefaultCompression"
BEST_SPEED = "BestSpeed"
BEST_COMPRESSION = "BestCompression"
ZSTD_BEST_SPEED = "ZstdBestSpeed"
ZSTD_DEFAULT = "ZstdDefaultCompression"
ZSTD_BEST_COMPRESSION = "ZstdBestCompression"
NO_COMPRESSION = "NoCompression"
| BackupCompressionLevel |
python | bokeh__bokeh | src/bokeh/plotting/contour.py | {
"start": 2562,
"end": 2769
} | class ____:
''' Combined filled and line contours over a whole sequence of contour levels.
'''
fill_coords: FillCoords | None
line_coords: LineCoords | None
@dataclass(frozen=True)
| ContourCoords |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 186428,
"end": 187374
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of CreateLinkedBranch"""
__schema__ = github_schema
__field_names__ = ("issue_id", "oid", "name", "repository_id", "client_mutation_id")
issue_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="issueId")
"""ID of the issue to link to."""
oid = sgqlc.types.Field(sgqlc.types.non_null(GitObjectID), graphql_name="oid")
"""The commit SHA to base the new branch on."""
name = sgqlc.types.Field(String, graphql_name="name")
"""The name of the new branch. Defaults to issue number and title."""
repository_id = sgqlc.types.Field(ID, graphql_name="repositoryId")
"""ID of the repository to create the branch in. Defaults to the
issue repository.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| CreateLinkedBranchInput |
python | django__django | tests/forms_tests/field_tests/test_multivaluefield.py | {
"start": 1338,
"end": 1754
} | class ____(MultiValueField):
def __init__(self, **kwargs):
fields = (
CharField(),
MultipleChoiceField(choices=beatles),
SplitDateTimeField(),
)
super().__init__(fields, **kwargs)
def compress(self, data_list):
if data_list:
return "%s,%s,%s" % (data_list[0], "".join(data_list[1]), data_list[2])
return None
| ComplexField |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 9731,
"end": 10004
} | class ____(ModelEvent):
''' Announce a button click event on a Bokeh menu item.
'''
event_name = 'menu_item_click'
def __init__(self, model: Model, item: str | None = None) -> None:
self.item = item
super().__init__(model=model)
| MenuItemClick |
python | spack__spack | lib/spack/spack/platforms/freebsd.py | {
"start": 225,
"end": 529
} | class ____(Platform):
priority = 102
def __init__(self):
super().__init__("freebsd")
os = FreeBSDOs()
self.default_os = str(os)
self.add_operating_system(str(os), os)
@classmethod
def detect(cls):
return platform.system().lower() == "freebsd"
| FreeBSD |
python | numba__numba | numba/typed/typedlist.py | {
"start": 2908,
"end": 18791
} | class ____(MutableSequence, pt.Generic[T]):
"""A typed-list usable in Numba compiled functions.
Implements the MutableSequence interface.
"""
_legal_kwargs = ["lsttype", "meminfo", "allocated"]
def __new__(cls,
*args,
lsttype=None,
meminfo=None,
allocated=DEFAULT_ALLOCATED,
**kwargs):
if config.DISABLE_JIT:
return list(*args, **kwargs)
else:
return object.__new__(cls)
@classmethod
def empty_list(cls, item_type, allocated=DEFAULT_ALLOCATED):
"""Create a new empty List.
Parameters
----------
item_type: Numba type
type of the list item.
allocated: int
number of items to pre-allocate
"""
if config.DISABLE_JIT:
return list()
else:
return cls(lsttype=ListType(item_type), allocated=allocated)
def __init__(self, *args, **kwargs):
"""
For users, the constructor does not take any parameters.
The keyword arguments are for internal use only.
Parameters
----------
args: iterable
The iterable to initialize the list from
lsttype : numba.core.types.ListType; keyword-only
Used internally for the list type.
meminfo : MemInfo; keyword-only
Used internally to pass the MemInfo object when boxing.
allocated: int; keyword-only
Used internally to pre-allocate space for items
"""
illegal_kwargs = any((kw not in self._legal_kwargs for kw in kwargs))
if illegal_kwargs or args and kwargs:
raise TypeError("List() takes no keyword arguments")
if kwargs:
self._list_type, self._opaque = self._parse_arg(**kwargs)
else:
self._list_type = None
if args:
if not 0 <= len(args) <= 1:
raise TypeError(
"List() expected at most 1 argument, got {}"
.format(len(args))
)
iterable = args[0]
# Special case Numpy scalars or anything that quacks like a
# NumPy Array.
if hasattr(iterable, "ndim") and iterable.ndim == 0:
self.append(iterable.item())
else:
try:
iter(iterable)
except TypeError:
raise TypeError("List() argument must be iterable")
for i in args[0]:
self.append(i)
def _parse_arg(self, lsttype, meminfo=None, allocated=DEFAULT_ALLOCATED):
if not isinstance(lsttype, ListType):
raise TypeError('*lsttype* must be a ListType')
if meminfo is not None:
opaque = meminfo
else:
opaque = _make_list(lsttype.item_type, allocated=allocated)
return lsttype, opaque
@property
def _numba_type_(self):
if self._list_type is None:
raise TypeError("invalid operation on untyped list")
return self._list_type
@property
def _typed(self):
"""Returns True if the list is typed.
"""
return self._list_type is not None
@property
def _dtype(self):
if not self._typed:
raise RuntimeError("invalid operation on untyped list")
return self._list_type.dtype
def _initialise_list(self, item):
lsttype = types.ListType(typeof(item))
self._list_type, self._opaque = self._parse_arg(lsttype)
def __len__(self) -> int:
if not self._typed:
return 0
else:
return _length(self)
def _allocated(self):
if not self._typed:
return DEFAULT_ALLOCATED
else:
return _allocated(self)
def _is_mutable(self):
return _is_mutable(self)
def _make_mutable(self):
return _make_mutable(self)
def _make_immutable(self):
return _make_immutable(self)
def __eq__(self, other):
return _eq(self, other)
def __ne__(self, other):
return _ne(self, other)
def __lt__(self, other):
return _lt(self, other)
def __le__(self, other):
return _le(self, other)
def __gt__(self, other):
return _gt(self, other)
def __ge__(self, other):
return _ge(self, other)
def append(self, item: T) -> None:
if not self._typed:
self._initialise_list(item)
_append(self, item)
# noqa F811 comments required due to github.com/PyCQA/pyflakes/issues/592
# noqa E704 required to follow overload style of using ... in the same line
@pt.overload # type: ignore[override]
def __setitem__(self, i: int, o: T) -> None: ... # noqa: F811, E704
@pt.overload
def __setitem__(self, s: slice, o: 'List[T]') -> None: ... # noqa: F811, E704, E501
def __setitem__(self, i: Int_or_Slice, item: T_or_ListT) -> None: # noqa: F811, E501
if not self._typed:
self._initialise_list(item)
_setitem(self, i, item)
# noqa F811 comments required due to github.com/PyCQA/pyflakes/issues/592
# noqa E704 required to follow overload style of using ... in the same line
@pt.overload
def __getitem__(self, i: int) -> T: ... # noqa: F811, E704
@pt.overload
def __getitem__(self, i: slice) -> 'List[T]': ... # noqa: F811, E704
def __getitem__(self, i: Int_or_Slice) -> T_or_ListT: # noqa: F811
if not self._typed:
raise IndexError
else:
return _getitem(self, i)
def __iter__(self) -> pt.Iterator[T]:
for i in range(len(self)):
yield self[i]
def __contains__(self, item: T) -> bool: # type: ignore[override]
return _contains(self, item)
def __delitem__(self, i: Int_or_Slice) -> None:
_delitem(self, i)
def insert(self, i: int, item: T) -> None:
if not self._typed:
self._initialise_list(item)
_insert(self, i, item)
def count(self, item: T) -> int:
return _count(self, item)
def pop(self, i: "pt.SupportsIndex" = -1) -> T:
return _pop(self, i)
def extend(self, iterable: "_Sequence[T]") -> None: #type: ignore[override]
# Empty iterable, do nothing
if len(iterable) == 0:
return None
if not self._typed:
# Need to get the first element of the iterable to initialise the
# type of the list. FIXME: this may be a problem if the iterable
# can not be sliced.
self._initialise_list(iterable[0])
return _extend(self, iterable)
def remove(self, item: T) -> None:
return _remove(self, item)
def clear(self):
return _clear(self)
def reverse(self):
return _reverse(self)
def copy(self):
return _copy(self)
def index(self, item: T, start: pt.Optional[int] = None,
stop: pt.Optional[int] = None) -> int:
return _index(self, item, start, stop)
def sort(self, key=None, reverse=False):
"""Sort the list inplace.
See also ``list.sort()``
"""
# If key is not already a dispatcher object, make it so
if callable(key) and not isinstance(key, Dispatcher):
key = njit(key)
return _sort(self, key, reverse)
def __str__(self):
buf = []
for x in self:
buf.append("{}".format(x))
# Check whether the code was invoked from IPython shell
try:
get_ipython
preview = ', '.join(buf[:1000])
suffix = ', ...' if len(buf) > 1000 else ''
return '[{0}{1}]'.format(preview, suffix)
except (NameError, IndexError):
return '[{0}]'.format(', '.join(buf))
def __repr__(self):
body = str(self)
prefix = str(self._list_type) if self._typed else "ListType[Undefined]"
return "{prefix}({body})".format(prefix=prefix, body=body)
@overload_classmethod(ListType, 'empty_list')
def typedlist_empty(cls, item_type, allocated=DEFAULT_ALLOCATED):
if cls.instance_type is not ListType:
return
def impl(cls, item_type, allocated=DEFAULT_ALLOCATED):
return listobject.new_list(item_type, allocated=allocated)
return impl
@box(types.ListType)
def box_lsttype(typ, val, c):
context = c.context
builder = c.builder
# XXX deduplicate
ctor = cgutils.create_struct_proxy(typ)
lstruct = ctor(context, builder, value=val)
# Returns the plain MemInfo
boxed_meminfo = c.box(
types.MemInfoPointer(types.voidptr),
lstruct.meminfo,
)
modname = c.context.insert_const_string(
c.builder.module, 'numba.typed.typedlist',
)
typedlist_mod = c.pyapi.import_module(modname)
fmp_fn = c.pyapi.object_getattr_string(typedlist_mod, '_from_meminfo_ptr')
lsttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
result_var = builder.alloca(c.pyapi.pyobj)
builder.store(cgutils.get_null_value(c.pyapi.pyobj), result_var)
with builder.if_then(cgutils.is_not_null(builder, lsttype_obj)):
res = c.pyapi.call_function_objargs(
fmp_fn, (boxed_meminfo, lsttype_obj),
)
c.pyapi.decref(fmp_fn)
c.pyapi.decref(typedlist_mod)
c.pyapi.decref(boxed_meminfo)
builder.store(res, result_var)
return builder.load(result_var)
@unbox(types.ListType)
def unbox_listtype(typ, val, c):
context = c.context
builder = c.builder
# Check that `type(val) is Dict`
list_type = c.pyapi.unserialize(c.pyapi.serialize_object(List))
valtype = c.pyapi.object_type(val)
same_type = builder.icmp_unsigned("==", valtype, list_type)
with c.builder.if_else(same_type) as (then, orelse):
with then:
miptr = c.pyapi.object_getattr_string(val, '_opaque')
native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)
mi = native.value
ctor = cgutils.create_struct_proxy(typ)
lstruct = ctor(context, builder)
data_pointer = context.nrt.meminfo_data(builder, mi)
data_pointer = builder.bitcast(
data_pointer,
listobject.ll_list_type.as_pointer(),
)
lstruct.data = builder.load(data_pointer)
lstruct.meminfo = mi
lstobj = lstruct._getvalue()
c.pyapi.decref(miptr)
bb_unboxed = c.builder.basic_block
with orelse:
# Raise error on incorrect type
c.pyapi.err_format(
"PyExc_TypeError",
"can't unbox a %S as a %S",
valtype, list_type,
)
bb_else = c.builder.basic_block
# Phi nodes to gather the output
lstobj_res = c.builder.phi(lstobj.type)
is_error_res = c.builder.phi(cgutils.bool_t)
lstobj_res.add_incoming(lstobj, bb_unboxed)
lstobj_res.add_incoming(lstobj.type(None), bb_else)
is_error_res.add_incoming(cgutils.false_bit, bb_unboxed)
is_error_res.add_incoming(cgutils.true_bit, bb_else)
# cleanup
c.pyapi.decref(list_type)
c.pyapi.decref(valtype)
return NativeValue(lstobj_res, is_error=is_error_res)
#
# The following contains the logic for the type-inferred constructor
#
def _guess_dtype(iterable):
"""Guess the correct dtype of the iterable type. """
if not isinstance(iterable, types.IterableType):
raise TypingError(
"List() argument must be iterable")
# Special case for nested NumPy arrays.
elif isinstance(iterable, types.Array) and iterable.ndim > 1:
return iterable.copy(ndim=iterable.ndim - 1, layout='A')
elif hasattr(iterable, "dtype"):
return iterable.dtype
elif hasattr(iterable, "yield_type"):
return iterable.yield_type
elif isinstance(iterable, types.UnicodeType):
return iterable
elif isinstance(iterable, types.DictType):
return iterable.key_type
else:
# This should never happen, since the 'dtype' of any iterable
# should have determined above.
raise TypingError(
"List() argument does not have a suitable dtype")
@type_callable(ListType)
def typedlist_call(context):
"""Defines typing logic for ``List()`` and ``List(iterable)``.
If no argument is given, the returned typer types a new typed-list with an
undefined item type. If a single argument is given it must be iterable with
a guessable 'dtype'. In this case, the typer types a new typed-list with
the type set to the 'dtype' of the iterable arg.
Parameters
----------
arg : single iterable (optional)
The single optional argument.
Returns
-------
typer : function
A typer suitable to type constructor calls.
Raises
------
The returned typer raises a TypingError in case of unsuitable arguments.
"""
class Typer(object):
def attach_sig(self):
from inspect import signature as mypysig
def mytyper(iterable):
pass
self.pysig = mypysig(mytyper)
def __call__(self, *args, **kwargs):
if kwargs:
raise TypingError(
"List() takes no keyword arguments"
)
elif args:
if not 0 <= len(args) <= 1:
raise TypingError(
"List() expected at most 1 argument, got {}"
.format(len(args))
)
rt = types.ListType(_guess_dtype(args[0]))
self.attach_sig()
return Signature(rt, args, None, pysig=self.pysig)
else:
item_type = types.undefined
return types.ListType(item_type)
return Typer()
@overload(numba_typeref_ctor)
def impl_numba_typeref_ctor(cls, *args):
"""Defines lowering for ``List()`` and ``List(iterable)``.
This defines the lowering logic to instantiate either an empty typed-list
or a typed-list initialised with values from a single iterable argument.
Parameters
----------
cls : TypeRef
Expecting a TypeRef of a precise ListType.
args: tuple
A tuple that contains a single iterable (optional)
Returns
-------
impl : function
An implementation suitable for lowering the constructor call.
See also: `redirect_type_ctor` in numba/cpython/bulitins.py
"""
list_ty = cls.instance_type
if not isinstance(list_ty, types.ListType):
return # reject
# Ensure the list is precisely typed.
if not list_ty.is_precise():
msg = "expecting a precise ListType but got {}".format(list_ty)
raise LoweringError(msg)
item_type = types.TypeRef(list_ty.item_type)
if args:
# special case 0d Numpy arrays
if isinstance(args[0], types.Array) and args[0].ndim == 0:
def impl(cls, *args):
# Instantiate an empty list and populate it with the single
# value from the array.
r = List.empty_list(item_type)
r.append(args[0].item())
return r
else:
def impl(cls, *args):
# Instantiate an empty list and populate it with values from
# the iterable.
r = List.empty_list(item_type)
for i in args[0]:
r.append(i)
return r
else:
def impl(cls, *args):
# Simply call .empty_list with the item type from *cls*
return List.empty_list(item_type)
return impl
| List |
python | walkccc__LeetCode | solutions/1011. Capacity To Ship Packages Within D Days/1011.py | {
"start": 0,
"end": 516
} | class ____:
def shipWithinDays(self, weights: list[int], days: int) -> int:
def shipDays(shipCapacity: int) -> int:
shipDays = 1
capacity = 0
for weight in weights:
if capacity + weight > shipCapacity:
shipDays += 1
capacity = weight
else:
capacity += weight
return shipDays
l = max(weights)
r = sum(weights)
return bisect.bisect_left(range(l, r), True,
key=lambda m: shipDays(m) <= days) + l
| Solution |
python | pytorch__pytorch | torch/_inductor/pattern_matcher.py | {
"start": 16632,
"end": 16996
} | class ____(PatternExpr):
"""
Match an arg, but don't pass it to handler
"""
def _match(self, node: NodeOrConstant, ctx: MatchContext) -> MatchResult:
return Match(ctx, self) # matches anything
def __repr__(self) -> str:
return "*"
def pretty_print(self, pp: PatternPrettyPrinter) -> str:
return "Ignored()"
| Ignored |
python | explosion__spaCy | spacy/language.py | {
"start": 101727,
"end": 102349
} | class ____:
"""Dataclass containing information about a component and its defaults
provided by the @Language.component or @Language.factory decorator. It's
created whenever a component is defined and stored on the Language class for
each component instance and factory instance.
"""
factory: str
default_config: Optional[Dict[str, Any]] = None # noqa: E704
assigns: Iterable[str] = tuple()
requires: Iterable[str] = tuple()
retokenizes: bool = False
scores: Iterable[str] = tuple()
default_score_weights: Optional[Dict[str, Optional[float]]] = None # noqa: E704
| FactoryMeta |
python | getsentry__sentry | src/sentry/monitors/migrations/0013_delete_monitor_is_muted_field.py | {
"start": 239,
"end": 1530
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("monitors", "0012_remove_monitor_is_muted_field"),
]
operations = [
SafeRemoveField(
model_name="monitor",
name="is_muted",
deletion_action=DeletionAction.DELETE,
),
]
| Migration |
python | joke2k__faker | tests/providers/test_company.py | {
"start": 14339,
"end": 17843
} | class ____:
"""Test ru_RU company provider methods"""
def test_calculate_checksum_nine_digits(self):
assert calculate_checksum("164027304") == "7"
assert calculate_checksum("629082979") == "0"
assert calculate_checksum("0203184580") == "5"
assert calculate_checksum("1113145630") == "0"
assert calculate_checksum("70517081385") == "1"
assert calculate_checksum("60307390550") == "0"
def test_businesses_inn(self, faker, num_samples):
for _ in range(num_samples):
inn = faker.businesses_inn()
assert len(inn) == 10
assert calculate_checksum(inn[:9]) == inn[9]
def test_individuals_inn(self, faker, num_samples):
for _ in range(num_samples):
inn = faker.individuals_inn()
assert len(inn) == 12
assert calculate_checksum(inn[:10]) == inn[10]
assert calculate_checksum(inn[:11]) == inn[11]
def test_businesses_ogrn(self, faker, num_samples):
max_year = datetime.now().year - 2000
for _ in range(num_samples):
ogrn = faker.businesses_ogrn()
assert len(ogrn) == 13
assert ogrn[0] in ("1", "5")
assert 1 <= int(ogrn[1:3]) <= max_year
assert 1 <= int(ogrn[3:5]) <= 92
assert int(ogrn[:-1]) % 11 % 10 == int(ogrn[-1])
def test_individuals_ogrn(self, faker, num_samples):
max_year = datetime.now().year - 2000
for _ in range(num_samples):
ogrn = faker.individuals_ogrn()
assert len(ogrn) == 15
assert ogrn[0] == "3"
assert 1 <= int(ogrn[1:3]) <= max_year
assert 1 <= int(ogrn[3:5]) <= 92
assert int(ogrn[:-1]) % 13 % 10 == int(ogrn[-1])
def test_kpp(self, faker, num_samples):
for _ in range(num_samples):
kpp = faker.kpp()
assert len(kpp) == 9
assert 1 <= int(kpp[0:2]) <= 92
assert int(kpp[2:4]) > 0
assert kpp[4:6] in ("01", "43", "44", "45")
def test_company_prefix(self, faker, num_samples):
for _ in range(num_samples):
prefix = faker.company_prefix()
assert isinstance(prefix, str)
assert prefix in RuRuCompanyProvider.company_prefixes
def test_company_suffix(self, faker, num_samples):
for _ in range(num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in RuRuCompanyProvider.company_suffixes
def test_large_companies(self, faker, num_samples):
for _ in range(num_samples):
company = faker.large_company()
assert isinstance(company, str)
assert company in RuRuCompanyProvider.large_companies
def test_catchphrase(self, faker, num_samples):
for _ in range(num_samples):
catchphrase = faker.catch_phrase()
assert isinstance(catchphrase, str)
assert " и " in catchphrase
def test_bs(self, faker, num_samples):
for _ in range(num_samples):
bs = faker.bs()
bs_words = bs.split()
assert isinstance(bs, str)
assert bs_words[0] in RuRuCompanyProvider.bsWords[0]
def test_snils(self, faker, num_samples):
for _ in range(num_samples):
snils = faker.snils()
assert len(snils) == 11
assert snils[-2:] == calculate_snils_checksum(snils[:10])
| TestRuRu |
python | keras-team__keras | keras/src/backend/jax/core.py | {
"start": 18274,
"end": 19861
} | class ____(base_name_scope):
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
self._jax_name_scope = jax.named_scope(name)
def __enter__(self):
name_scope_stack = global_state.get_global_attribute(
"name_scope_stack", default=[], set_to_default=True
)
if self.deduplicate and name_scope_stack:
parent_caller = name_scope_stack[-1].caller
parent_name = name_scope_stack[-1].name
if (
self.caller is not None
and self.caller is parent_caller
and self.name == parent_name
):
return self
name_scope_stack.append(self)
self._pop_on_exit = True
self._jax_name_scope.__enter__()
return self
def __exit__(self, *args, **kwargs):
super().__exit__(*args, **kwargs)
if self._pop_on_exit:
self._jax_name_scope.__exit__(*args, **kwargs)
def device_scope(device_name):
if isinstance(device_name, str):
# We support string value like "cpu:0", "gpu:1", etc.
device_name = device_name.lower()
jax_device = distribution_lib._to_backend_device(device_name)
elif not isinstance(device_name, jax.Device):
raise ValueError(
"Invalid value for argument `device_name`. "
"Expected a string like 'gpu:0' or a `jax.Device` instance. "
f"Received: device_name='{device_name}'"
)
else:
jax_device = device_name
return jax.default_device(jax_device)
| name_scope |
python | huggingface__transformers | src/transformers/models/instructblipvideo/modular_instructblipvideo.py | {
"start": 1466,
"end": 1540
} | class ____(InstructBlipVisionConfig):
pass
| InstructBlipVideoVisionConfig |
python | huggingface__transformers | src/transformers/models/clip/modeling_clip.py | {
"start": 3894,
"end": 5726
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`CLIPTextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`CLIPVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
| CLIPOutput |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 10940,
"end": 11208
} | class ____(TypedDict, total=False):
verbose: bool
use_component_modules: Sequence[str]
suppress_warnings: Sequence[DgWarningIdentifier]
telemetry: RawDgTelemetryConfig
# ########################
# ##### PROJECT
# ########################
| DgRawCliConfig |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/_poly_fixtures.py | {
"start": 12597,
"end": 13236
} | class ____(_PolymorphicFixtureBase):
select_type = "Joins"
@classmethod
def _get_polymorphics(cls):
people, engineers, managers, boss = (
cls.tables.people,
cls.tables.engineers,
cls.tables.managers,
cls.tables.boss,
)
person_join = people.outerjoin(engineers).outerjoin(managers)
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ([Person, Manager, Engineer], person_join)
manager_with_polymorphic = ("*", manager_join)
return person_with_polymorphic, manager_with_polymorphic
| _PolymorphicJoins |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 4385,
"end": 4580
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("REQUIRED_REVIEWERS", "WAIT_TIMER")
| DeploymentProtectionRuleType |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.