language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pydantic__pydantic | tests/mypy/modules/dataclass_no_any.py | {
"start": 56,
"end": 124
} | class ____:
foo: int
@dataclass(config={'title': 'Bar Title'})
| Foo |
python | docker__docker-py | tests/unit/sshadapter_test.py | {
"start": 80,
"end": 1144
} | class ____(unittest.TestCase):
@staticmethod
def test_ssh_hostname_prefix_trim():
conn = docker.transport.SSHHTTPAdapter(
base_url="ssh://user@hostname:1234", shell_out=True)
assert conn.ssh_host == "user@hostname:1234"
@staticmethod
def test_ssh_parse_url():
c = SSHSocket(host="user@hostname:1234")
assert c.host == "hostname"
assert c.port == "1234"
assert c.user == "user"
@staticmethod
def test_ssh_parse_hostname_only():
c = SSHSocket(host="hostname")
assert c.host == "hostname"
assert c.port is None
assert c.user is None
@staticmethod
def test_ssh_parse_user_and_hostname():
c = SSHSocket(host="user@hostname")
assert c.host == "hostname"
assert c.port is None
assert c.user == "user"
@staticmethod
def test_ssh_parse_hostname_and_port():
c = SSHSocket(host="hostname:22")
assert c.host == "hostname"
assert c.port == "22"
assert c.user is None
| SSHAdapterTest |
python | sphinx-doc__sphinx | doc/development/tutorials/examples/recipe.py | {
"start": 3696,
"end": 5439
} | class ____(Domain):
name = 'recipe'
label = 'Recipe Sample'
roles = {
'ref': XRefRole(),
}
directives = {
'recipe': RecipeDirective,
}
indices = {
RecipeIndex,
IngredientIndex,
}
initial_data = {
'recipes': [], # object list
'recipe_ingredients': {}, # name -> object
}
data_version = 0
def get_full_qualified_name(self, node):
return f'recipe.{node.arguments[0]}'
def get_objects(self):
yield from self.data['recipes']
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
match = [
(docname, anchor)
for name, sig, typ, docname, anchor, prio in self.get_objects()
if sig == target
]
if len(match) > 0:
todocname = match[0][0]
targ = match[0][1]
return make_refnode(builder, fromdocname, todocname, targ, contnode, targ)
else:
print('Awww, found nothing')
return None
def add_recipe(self, signature, ingredients):
"""Add a new recipe to the domain."""
name = f'recipe.{signature}'
anchor = f'recipe-{signature}'
self.data['recipe_ingredients'][name] = ingredients
# name, dispname, type, docname, anchor, priority
self.data['recipes'].append((
name,
signature,
'Recipe',
self.env.current_document.docname,
anchor,
0,
))
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_domain(RecipeDomain)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| RecipeDomain |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 7426,
"end": 7600
} | class ____(ConcreteTemplate):
cases = list(integer_binop_cases)
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
@infer_global(divmod)
| BinOpFloorDiv |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_redshift_cluster.py | {
"start": 1008,
"end": 3502
} | class ____:
@staticmethod
def _create_clusters():
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(
ClusterIdentifier="test_cluster",
NodeType="dc1.large",
MasterUsername="admin",
MasterUserPassword="mock_password",
)
client.create_cluster(
ClusterIdentifier="test_cluster_2",
NodeType="dc1.large",
MasterUsername="admin",
MasterUserPassword="mock_password",
)
if not client.describe_clusters()["Clusters"]:
raise ValueError("AWS not properly mocked")
@mock_aws
def test_get_client_type_returns_a_boto3_client_of_the_requested_type(self):
self._create_clusters()
hook = AwsBaseHook(aws_conn_id="aws_default", client_type="redshift")
client_from_hook = hook.conn
clusters = client_from_hook.describe_clusters()["Clusters"]
assert len(clusters) == 2
@mock_aws
def test_restore_from_cluster_snapshot_returns_dict_with_cluster_data(self):
self._create_clusters()
hook = RedshiftHook(aws_conn_id="aws_default")
hook.create_cluster_snapshot("test_snapshot", "test_cluster")
assert (
hook.restore_from_cluster_snapshot("test_cluster_3", "test_snapshot")["ClusterIdentifier"]
== "test_cluster_3"
)
@mock_aws
def test_delete_cluster_returns_a_dict_with_cluster_data(self):
self._create_clusters()
hook = RedshiftHook(aws_conn_id="aws_default")
cluster = hook.delete_cluster("test_cluster_2")
assert cluster is not None
@mock_aws
def test_create_cluster_snapshot_returns_snapshot_data(self):
self._create_clusters()
hook = RedshiftHook(aws_conn_id="aws_default")
snapshot = hook.create_cluster_snapshot("test_snapshot_2", "test_cluster")
assert snapshot is not None
@mock_aws
def test_cluster_status_returns_cluster_not_found(self):
self._create_clusters()
hook = RedshiftHook(aws_conn_id="aws_default")
status = hook.cluster_status("test_cluster_not_here")
assert status == "cluster_not_found"
@mock_aws
def test_cluster_status_returns_available_cluster(self):
self._create_clusters()
hook = RedshiftHook(aws_conn_id="aws_default")
status = hook.cluster_status("test_cluster")
assert status == "available"
| TestRedshiftHook |
python | Netflix__metaflow | test/unit/spin/flows/merge_artifacts_flow.py | {
"start": 38,
"end": 1273
} | class ____(FlowSpec):
@step
def start(self):
self.pass_down = "a"
self.next(self.a, self.b)
@step
def a(self):
self.common = 5
self.x = 1
self.y = 3
self.from_a = 6
self.next(self.join)
@step
def b(self):
self.common = 5
self.x = 2
self.y = 4
self.next(self.join)
@step
def join(self, inputs):
self.x = inputs.a.x
self.merge_artifacts(inputs, exclude=["y"])
print("x is %s" % self.x)
print("pass_down is %s" % self.pass_down)
print("common is %d" % self.common)
print("from_a is %d" % self.from_a)
self.next(self.c)
@step
def c(self):
self.next(self.d, self.e)
@step
def d(self):
self.conflicting = 7
self.next(self.join2)
@step
def e(self):
self.conflicting = 8
self.next(self.join2)
@step
def join2(self, inputs):
self.merge_artifacts(inputs, include=["pass_down", "common"])
print("Only pass_down and common exist here")
self.next(self.end)
@step
def end(self):
pass
if __name__ == "__main__":
MergeArtifactsFlow()
| MergeArtifactsFlow |
python | matplotlib__matplotlib | lib/matplotlib/font_manager.py | {
"start": 35056,
"end": 58925
} | class ____:
"""
On import, the `FontManager` singleton instance creates a list of ttf and
afm fonts and caches their `FontProperties`. The `FontManager.findfont`
method does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, the default
font is returned.
Fonts added with the `FontManager.addfont` method will not persist in the
cache; therefore, `addfont` will need to be called every time Matplotlib is
imported. This method should only be used if and when a font cannot be
installed on your operating system by other means.
Notes
-----
The `FontManager.addfont` method must be called on the global `FontManager`
instance.
Example usage::
import matplotlib.pyplot as plt
from matplotlib import font_manager
font_dirs = ["/resources/fonts"] # The path to the custom font file.
font_files = font_manager.findSystemFonts(fontpaths=font_dirs)
for font_file in font_files:
font_manager.fontManager.addfont(font_file)
"""
# Increment this version number whenever the font cache data
# format or behavior has changed and requires an existing font
# cache files to be rebuilt.
__version__ = '3.11.0a1'
def __init__(self, size=None, weight='normal'):
self._version = self.__version__
self.__default_weight = weight
self.default_size = size
# Create list of font paths.
paths = [cbook._get_data_path('fonts', subdir)
for subdir in ['ttf', 'afm', 'pdfcorefonts']]
_log.debug('font search path %s', paths)
self.defaultFamily = {
'ttf': 'DejaVu Sans',
'afm': 'Helvetica'}
self.afmlist = []
self.ttflist = []
# Delay the warning by 5s.
timer = threading.Timer(5, lambda: _log.warning(
'Matplotlib is building the font cache; this may take a moment.'))
timer.start()
try:
for fontext in ["afm", "ttf"]:
for path in [*findSystemFonts(paths, fontext=fontext),
*findSystemFonts(fontext=fontext)]:
try:
self.addfont(path)
except OSError as exc:
_log.info("Failed to open font file %s: %s", path, exc)
except Exception as exc:
_log.info("Failed to extract font properties from %s: "
"%s", path, exc)
finally:
timer.cancel()
def addfont(self, path):
"""
Cache the properties of the font at *path* to make it available to the
`FontManager`. The type of font is inferred from the path suffix.
Parameters
----------
path : str or path-like
Notes
-----
This method is useful for adding a custom font without installing it in
your operating system. See the `FontManager` singleton instance for
usage and caveats about this function.
"""
# Convert to string in case of a path as
# afmFontProperty and FT2Font expect this
path = os.fsdecode(path)
if Path(path).suffix.lower() == ".afm":
with open(path, "rb") as fh:
font = _afm.AFM(fh)
prop = afmFontProperty(path, font)
self.afmlist.append(prop)
else:
font = ft2font.FT2Font(path)
prop = ttfFontProperty(font)
self.ttflist.append(prop)
self._findfont_cached.cache_clear()
@property
def defaultFont(self):
# Lazily evaluated (findfont then caches the result) to avoid including
# the venv path in the json serialization.
return {ext: self.findfont(family, fontext=ext)
for ext, family in self.defaultFamily.items()}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
@staticmethod
def get_default_size():
"""
Return the default font size.
"""
return mpl.rcParams['font.size']
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
@staticmethod
def _expand_aliases(family):
if family in ('sans', 'sans serif'):
family = 'sans-serif'
return mpl.rcParams['font.' + family]
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Return a match score between the list of font families in
*families* and the font family name *family2*.
An exact match at the head of the list returns 0.0.
A match further down the list will return between 0 and 1.
No match will return 1.0.
"""
if not isinstance(families, (list, tuple)):
families = [families]
elif len(families) == 0:
return 1.0
family2 = family2.lower()
step = 1 / len(families)
for i, family1 in enumerate(families):
family1 = family1.lower()
if family1 in font_family_aliases:
options = [*map(str.lower, self._expand_aliases(family1))]
if family2 in options:
idx = options.index(family2)
return (i + (idx / len(options))) * step
elif family1 == family2:
# The score should be weighted by where in the
# list the font was found.
return i * step
return 1.0
def score_style(self, style1, style2):
"""
Return a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif (style1 in ('italic', 'oblique')
and style2 in ('italic', 'oblique')):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Return a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Return a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Return a match score between *weight1* and *weight2*.
The result is 0.0 if both weight1 and weight 2 are given as strings
and have the same value.
Otherwise, the result is the absolute value of the difference between
the CSS numeric values of *weight1* and *weight2*, normalized between
0.05 and 1.0.
"""
# exact match of the weight names, e.g. weight1 == weight2 == "regular"
if cbook._str_equal(weight1, weight2):
return 0.0
w1 = _normalize_weight(weight1)
w2 = _normalize_weight(weight2)
return 0.95 * (abs(w1 - w2) / 1000) + 0.05
def score_size(self, size1, size2):
"""
Return a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings[size1]
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72
def findfont(self, prop, fontext='ttf', directory=None,
fallback_to_default=True, rebuild_if_missing=True):
"""
Find the path to the font file most closely matching the given font properties.
Parameters
----------
prop : str or `~matplotlib.font_manager.FontProperties`
The font properties to search for. This can be either a
`.FontProperties` object or a string defining a
`fontconfig patterns`_.
fontext : {'ttf', 'afm'}, default: 'ttf'
The extension of the font file:
- 'ttf': TrueType and OpenType fonts (.ttf, .ttc, .otf)
- 'afm': Adobe Font Metrics (.afm)
directory : str, optional
If given, only search this directory and its subdirectories.
fallback_to_default : bool
If True, will fall back to the default font family (usually
"DejaVu Sans" or "Helvetica") if the first lookup hard-fails.
rebuild_if_missing : bool
Whether to rebuild the font cache and search again if the first
match appears to point to a nonexisting font (i.e., the font cache
contains outdated entries).
Returns
-------
str
The filename of the best matching font.
Notes
-----
This performs a nearest neighbor search. Each font is given a
similarity score to the target font properties. The first font with
the highest score is returned. If no matches below a certain
threshold are found, the default font (usually DejaVu Sans) is
returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
.. _fontconfig patterns:
https://www.freedesktop.org/software/fontconfig/fontconfig-user.html
"""
# Pass the relevant rcParams (and the font manager, as `self`) to
# _findfont_cached so to prevent using a stale cache entry after an
# rcParam was changed.
rc_params = tuple(tuple(mpl.rcParams[key]) for key in [
"font.serif", "font.sans-serif", "font.cursive", "font.fantasy",
"font.monospace"])
ret = self._findfont_cached(
prop, fontext, directory, fallback_to_default, rebuild_if_missing,
rc_params)
if isinstance(ret, cbook._ExceptionInfo):
raise ret.to_exception()
return ret
def get_font_names(self):
"""Return the list of available fonts."""
return list({font.name for font in self.ttflist})
def _find_fonts_by_props(self, prop, fontext='ttf', directory=None,
fallback_to_default=True, rebuild_if_missing=True):
"""
Find the paths to the font files most closely matching the given properties.
Parameters
----------
prop : str or `~matplotlib.font_manager.FontProperties`
The font properties to search for. This can be either a
`.FontProperties` object or a string defining a
`fontconfig patterns`_.
fontext : {'ttf', 'afm'}, default: 'ttf'
The extension of the font file:
- 'ttf': TrueType and OpenType fonts (.ttf, .ttc, .otf)
- 'afm': Adobe Font Metrics (.afm)
directory : str, optional
If given, only search this directory and its subdirectories.
fallback_to_default : bool
If True, will fall back to the default font family (usually
"DejaVu Sans" or "Helvetica") if none of the families were found.
rebuild_if_missing : bool
Whether to rebuild the font cache and search again if the first
match appears to point to a nonexisting font (i.e., the font cache
contains outdated entries).
Returns
-------
list[str]
The paths of the fonts found.
Notes
-----
This is an extension/wrapper of the original findfont API, which only
returns a single font for given font properties. Instead, this API
returns a list of filepaths of multiple fonts which closely match the
given font properties. Since this internally uses the original API,
there's no change to the logic of performing the nearest neighbor
search. See `findfont` for more details.
"""
prop = FontProperties._from_any(prop)
fpaths = []
for family in prop.get_family():
cprop = prop.copy()
cprop.set_family(family) # set current prop's family
try:
fpaths.append(
self.findfont(
cprop, fontext, directory,
fallback_to_default=False, # don't fallback to default
rebuild_if_missing=rebuild_if_missing,
)
)
except ValueError:
if family in font_family_aliases:
_log.warning(
"findfont: Generic family %r not found because "
"none of the following families were found: %s",
family, ", ".join(self._expand_aliases(family))
)
else:
_log.warning("findfont: Font family %r not found.", family)
# only add default family if no other font was found and
# fallback_to_default is enabled
if not fpaths:
if fallback_to_default:
dfamily = self.defaultFamily[fontext]
cprop = prop.copy()
cprop.set_family(dfamily)
fpaths.append(
self.findfont(
cprop, fontext, directory,
fallback_to_default=True,
rebuild_if_missing=rebuild_if_missing,
)
)
else:
raise ValueError("Failed to find any font, and fallback "
"to the default font was disabled")
return fpaths
@lru_cache(1024)
def _findfont_cached(self, prop, fontext, directory, fallback_to_default,
rebuild_if_missing, rc_params):
prop = FontProperties._from_any(prop)
fname = prop.get_file()
if fname is not None:
return fname
if fontext == 'afm':
fontlist = self.afmlist
else:
fontlist = self.ttflist
best_score = 1e64
best_font = None
_log.debug('findfont: Matching %s.', prop)
for font in fontlist:
if (directory is not None and
Path(directory) not in Path(font.fname).parents):
continue
# Matching family should have top priority, so multiply it by 10.
score = (self.score_family(prop.get_family(), font.name) * 10
+ self.score_style(prop.get_style(), font.style)
+ self.score_variant(prop.get_variant(), font.variant)
+ self.score_weight(prop.get_weight(), font.weight)
+ self.score_stretch(prop.get_stretch(), font.stretch)
+ self.score_size(prop.get_size(), font.size))
_log.debug('findfont: score(%s) = %s', font, score)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is not None and (_normalize_weight(prop.get_weight()) !=
_normalize_weight(best_font.weight)):
_log.warning('findfont: Failed to find font weight %s, now using %s.',
prop.get_weight(), best_font.weight)
if best_font is None or best_score >= 10.0:
if fallback_to_default:
_log.warning(
'findfont: Font family %s not found. Falling back to %s.',
prop.get_family(), self.defaultFamily[fontext])
for family in map(str.lower, prop.get_family()):
if family in font_family_aliases:
_log.warning(
"findfont: Generic family %r not found because "
"none of the following families were found: %s",
family, ", ".join(self._expand_aliases(family)))
default_prop = prop.copy()
default_prop.set_family(self.defaultFamily[fontext])
return self.findfont(default_prop, fontext, directory,
fallback_to_default=False)
else:
# This return instead of raise is intentional, as we wish to
# cache that it was not found, which will not occur if it was
# actually raised.
return cbook._ExceptionInfo(
ValueError,
f"Failed to find font {prop}, and fallback to the default font was "
f"disabled"
)
else:
_log.debug('findfont: Matching %s to %s (%r) with score of %f.',
prop, best_font.name, best_font.fname, best_score)
result = best_font.fname
if not os.path.isfile(result):
if rebuild_if_missing:
_log.info(
'findfont: Found a missing font file. Rebuilding cache.')
new_fm = _load_fontmanager(try_read_cache=False)
# Replace self by the new fontmanager, because users may have
# a reference to this specific instance.
# TODO: _load_fontmanager should really be (used by) a method
# modifying the instance in place.
vars(self).update(vars(new_fm))
return self.findfont(
prop, fontext, directory, rebuild_if_missing=False)
else:
# This return instead of raise is intentional, as we wish to
# cache that it was not found, which will not occur if it was
# actually raised.
return cbook._ExceptionInfo(ValueError, "No valid font could be found")
return _cached_realpath(result)
@lru_cache
def is_opentype_cff_font(filename):
"""
Return whether the given font is a Postscript Compact Font Format Font
embedded in an OpenType wrapper. Used by the PostScript and PDF backends
that cannot subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
with open(filename, 'rb') as fd:
return fd.read(4) == b"OTTO"
else:
return False
@lru_cache(64)
def _get_font(font_filepaths, hinting_factor, *, _kerning_factor, thread_id,
enable_last_resort):
first_fontpath, *rest = font_filepaths
fallback_list = [
ft2font.FT2Font(fpath, hinting_factor, _kerning_factor=_kerning_factor)
for fpath in rest
]
last_resort_path = _cached_realpath(
cbook._get_data_path('fonts', 'ttf', 'LastResortHE-Regular.ttf'))
try:
last_resort_index = font_filepaths.index(last_resort_path)
except ValueError:
last_resort_index = -1
# Add Last Resort font so we always have glyphs regardless of font, unless we're
# already in the list.
if enable_last_resort:
fallback_list.append(
ft2font.FT2Font(last_resort_path, hinting_factor,
_kerning_factor=_kerning_factor,
_warn_if_used=True))
last_resort_index = len(fallback_list)
font = ft2font.FT2Font(
first_fontpath, hinting_factor,
_fallback_list=fallback_list,
_kerning_factor=_kerning_factor
)
# Ensure we are using the right charmap for the Last Resort font; FreeType picks the
# Unicode one by default, but this exists only for Windows, and is empty.
if last_resort_index == 0:
font.set_charmap(0)
elif last_resort_index > 0:
fallback_list[last_resort_index - 1].set_charmap(0)
return font
# FT2Font objects cannot be used across fork()s because they reference the same
# FT_Library object. While invalidating *all* existing FT2Fonts after a fork
# would be too complicated to be worth it, the main way FT2Fonts get reused is
# via the cache of _get_font, which we can empty upon forking (not on Windows,
# which has no fork() or register_at_fork()).
if hasattr(os, "register_at_fork"):
os.register_at_fork(after_in_child=_get_font.cache_clear)
@lru_cache(64)
def _cached_realpath(path):
# Resolving the path avoids embedding the font twice in pdf/ps output if a
# single font is selected using two different relative paths.
return os.path.realpath(path)
def get_font(font_filepaths, hinting_factor=None):
"""
Get an `.ft2font.FT2Font` object given a list of file paths.
Parameters
----------
font_filepaths : Iterable[str, Path, bytes], str, Path, bytes
Relative or absolute paths to the font files to be used.
If a single string, bytes, or `pathlib.Path`, then it will be treated
as a list with that entry only.
If more than one filepath is passed, then the returned FT2Font object
will fall back through the fonts, in the order given, to find a needed
glyph.
Returns
-------
`.ft2font.FT2Font`
"""
if isinstance(font_filepaths, (str, Path, bytes)):
paths = (_cached_realpath(font_filepaths),)
else:
paths = tuple(_cached_realpath(fname) for fname in font_filepaths)
hinting_factor = mpl._val_or_rc(hinting_factor, 'text.hinting_factor')
return _get_font(
# must be a tuple to be cached
paths,
hinting_factor,
_kerning_factor=mpl.rcParams['text.kerning_factor'],
# also key on the thread ID to prevent segfaults with multi-threading
thread_id=threading.get_ident(),
enable_last_resort=mpl.rcParams['font.enable_last_resort'],
)
def _load_fontmanager(*, try_read_cache=True):
fm_path = Path(
mpl.get_cachedir(), f"fontlist-v{FontManager.__version__}.json")
if try_read_cache:
try:
fm = json_load(fm_path)
except Exception:
pass
else:
if getattr(fm, "_version", object()) == FontManager.__version__:
_log.debug("Using fontManager instance from %s", fm_path)
return fm
fm = FontManager()
json_dump(fm, fm_path)
_log.info("generated new fontManager")
return fm
fontManager = _load_fontmanager()
findfont = fontManager.findfont
get_font_names = fontManager.get_font_names
| FontManager |
python | pydantic__pydantic | docs/plugins/algolia.py | {
"start": 508,
"end": 6561
} | class ____(TypedDict):
content: str
pageID: str
abs_url: str
title: str
objectID: str
rank: int
records: list[AlgoliaRecord] = []
records_ta = TypeAdapter(list[AlgoliaRecord])
# these values should match docs/javascripts/search-worker.js.
ALGOLIA_APP_ID = 'KPPUDTIAVX'
ALGOLIA_INDEX_NAME = 'pydantic-docs'
# Algolia has a limit of 100kb per record in the paid plan,
# leave some space for the other fields as well.
MAX_CONTENT_LENGTH = 90_000
def get_heading_text(heading: Tag):
return heading.get_text().replace('¶', '').strip().replace('\n', ' ')
def on_page_content(html: str, page: Page, config: Config, files: Files) -> str:
if not os.getenv('CI'):
return html
from bs4 import BeautifulSoup
assert page.title is not None, 'Page title must not be None'
title = cast(str, page.title)
soup = BeautifulSoup(html, 'html.parser')
# If the page does not start with a heading, add the h1 with the title
# Some examples don't have a heading. or start with h2
first_element = soup.find()
if (
not first_element
or not first_element.name # type: ignore[reportAttributeAccessIssue]
or first_element.name not in ['h1', 'h2', 'h3'] # type: ignore[reportAttributeAccessIssue]
):
soup.insert(0, BeautifulSoup(f'<h1 id="{title}">{title}</h1>', 'html.parser'))
# Clean up presentational and UI elements
for element in soup.find_all(['autoref']):
element.decompose()
# this removes the large source code embeds from Github
for element in soup.find_all('details'):
element.decompose()
# Cleanup code examples
for extra in soup.find_all('div', attrs={'class': 'language-python highlight'}):
extra.replace_with(BeautifulSoup(f'<pre>{extra.find("code").get_text()}</pre>', 'html.parser'))
# Cleanup code examples, part 2
for extra in soup.find_all('div', attrs={'class': 'language-python doc-signature highlight'}):
extra.replace_with(BeautifulSoup(f'<pre>{extra.find("code").get_text()}</pre>', 'html.parser'))
# The API reference generates HTML tables with line numbers, this strips the line numbers cell and goes back to a code block
for extra in soup.find_all('table', attrs={'class': 'highlighttable'}):
extra.replace_with(BeautifulSoup(f'<pre>{extra.find("code").get_text()}</pre>', 'html.parser'))
headings = soup.find_all(['h1', 'h2', 'h3'])
# Use the rank to put the sections in the beginning higher in the search results
rank = 100
# Process each section
for current_heading in headings:
heading_id = current_heading.get('id', '')
section_title = get_heading_text(current_heading) # type: ignore[reportArgumentType]
# Get content until next heading
content: list[str] = []
sibling = current_heading.find_next_sibling()
while sibling and sibling.name not in {'h1', 'h2', 'h3'}:
content.append(str(sibling))
sibling = sibling.find_next_sibling()
section_html = ''.join(content)
section_soup = BeautifulSoup(section_html, 'html.parser')
section_plain_text = section_soup.get_text(' ', strip=True)
# Create anchor URL
anchor_url: str = f'{page.abs_url}#{heading_id}' if heading_id else page.abs_url or ''
record_title = title
if current_heading.name == 'h2':
record_title = f'{title} - {section_title}'
elif current_heading.name == 'h3':
previous_heading: Tag = current_heading.find_previous(['h1', 'h2']) # type: ignore[reportAssignmentType]
record_title = f'{title} - {get_heading_text(previous_heading)} - {section_title}'
# print(f'Adding record {record_title}')
# Create record for this section
records.append(
AlgoliaRecord(
content=section_plain_text,
pageID=title,
abs_url=anchor_url,
title=record_title,
objectID=anchor_url,
rank=rank,
)
)
rank -= 5
return html
ALGOLIA_RECORDS_FILE = 'algolia_records.json'
def on_post_build(config: Config) -> None:
if records:
algolia_records_path = Path(config['site_dir']) / ALGOLIA_RECORDS_FILE
with algolia_records_path.open('wb') as f:
f.write(records_ta.dump_json(records))
def algolia_upload() -> None:
from algoliasearch.search.client import SearchClientSync
algolia_write_api_key = os.environ['ALGOLIA_WRITE_API_KEY']
client = SearchClientSync(ALGOLIA_APP_ID, algolia_write_api_key)
filtered_records: list[AlgoliaRecord] = []
algolia_records_path = Path.cwd() / 'site' / ALGOLIA_RECORDS_FILE
with algolia_records_path.open('rb') as f:
all_records = records_ta.validate_json(f.read())
for record in all_records:
content = record['content']
if len(content) > MAX_CONTENT_LENGTH:
print(
f"Record with title '{record['title']}' has more than {MAX_CONTENT_LENGTH} characters, {len(content)}."
)
print(content)
else:
filtered_records.append(record)
print(f'Uploading {len(filtered_records)} out of {len(all_records)} records to Algolia...')
client.clear_objects(index_name=ALGOLIA_INDEX_NAME)
client.set_settings(
index_name=ALGOLIA_INDEX_NAME,
index_settings={
'searchableAttributes': ['title', 'content'],
'attributesToSnippet': ['content:40'],
'customRanking': [
'desc(rank)',
],
},
)
client.batch(
index_name=ALGOLIA_INDEX_NAME,
batch_write_params={'requests': [{'action': 'addObject', 'body': record} for record in filtered_records]},
)
if __name__ == '__main__':
if sys.argv[-1] == 'upload':
algolia_upload()
else:
print('Run with "upload" argument to upload records to Algolia.')
sys.exit(1)
| AlgoliaRecord |
python | PyCQA__pylint | pylint/utils/pragma_parser.py | {
"start": 1327,
"end": 2588
} | class ____(NamedTuple):
action: str
messages: list[str]
ATOMIC_KEYWORDS = frozenset(("disable-all", "skip-file"))
MESSAGE_KEYWORDS = frozenset(
("disable-next", "disable-msg", "enable-msg", "disable", "enable")
)
# sorted is necessary because sets are unordered collections and ALL_KEYWORDS
# string should not vary between executions
# reverse is necessary in order to have the longest keywords first, so that, for example,
# 'disable' string should not be matched instead of 'disable-all'
ALL_KEYWORDS = "|".join(
sorted(ATOMIC_KEYWORDS | MESSAGE_KEYWORDS, key=len, reverse=True)
)
TOKEN_SPECIFICATION = [
("KEYWORD", rf"\b({ALL_KEYWORDS:s})\b"),
("MESSAGE_STRING", r"[0-9A-Za-z\-\_]{2,}"), # Identifiers
("ASSIGN", r"="), # Assignment operator
("MESSAGE_NUMBER", r"[CREIWF]{1}\d*"),
]
TOK_REGEX = "|".join(
f"(?P<{token_name:s}>{token_rgx:s})"
for token_name, token_rgx in TOKEN_SPECIFICATION
)
def emit_pragma_representer(action: str, messages: list[str]) -> PragmaRepresenter:
if not messages and action in MESSAGE_KEYWORDS:
raise InvalidPragmaError(
"The keyword is not followed by message identifier", action
)
return PragmaRepresenter(action, messages)
| PragmaRepresenter |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-awsdocdb/llama_index/vector_stores/awsdocdb/base.py | {
"start": 3369,
"end": 10794
} | class ____(BasePydanticVectorStore):
"""
AWS DocumentDB Vector Store.
To use, you should have both:
- the ``pymongo`` python package installed
- a connection string associated with a DocumentDB Instance
Please refer to the official Vector Search documentation for more details:
https://docs.aws.amazon.com/documentdb/latest/developerguide/vector-search.html
"""
stores_text: bool = True
flat_metadata: bool = True
_docdb_client: MongoClient = PrivateAttr()
_similarity_score: AWSDocDbVectorStoreSimilarityType = PrivateAttr()
_collection: Any = PrivateAttr()
_embedding_key: str = PrivateAttr()
_id_key: str = PrivateAttr()
_text_key: str = PrivateAttr()
_metadata_key: str = PrivateAttr()
_insert_kwargs: Dict = PrivateAttr()
_index_crud: DocDbIndex = PrivateAttr()
def __init__(
self,
docdb_client: Optional[Any] = None,
db_name: str = "default_db",
index_name: str = "default_index",
collection_name: str = "default_collection",
id_key: str = "id",
embedding_key: str = "embedding",
text_key: str = "text",
metadata_key: str = "metadata",
insert_kwargs: Optional[Dict] = None,
similarity_score="cosine",
**kwargs: Any,
) -> None:
"""
Initialize the vector store.
Args:
docdb_client: A DocumentDB client.
db_name: A DocumentDB database name.
collection_name: A DocumentDB collection name.
id_key: The data field to use as the id.
embedding_key: A DocumentDB field that will contain
the embedding for each document.
text_key: A DocumentDB field that will contain the text for each document.
metadata_key: A DocumentDB field that will contain
the metadata for each document.
insert_kwargs: The kwargs used during `insert`.
"""
super().__init__()
if docdb_client is not None:
self._docdb_client = cast(MongoClient, docdb_client)
else:
raise ValueError("Must specify connection string to DocumentDB instance ")
self._similarity_score = similarity_score
self._collection = self._docdb_client[db_name][collection_name]
self._embedding_key = embedding_key
self._id_key = id_key
self._text_key = text_key
self._metadata_key = metadata_key
self._insert_kwargs = insert_kwargs or {}
self._index_crud = DocDbIndex(index_name, self._embedding_key, self._collection)
@classmethod
def class_name(cls) -> str:
return "AWSDocDbVectorStore"
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
Returns:
A List of ids for successfully added nodes.
"""
ids = []
data_to_insert = []
for node in nodes:
metadata = node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
)
entry = {
self._id_key: node.node_id,
self._embedding_key: node.get_embedding(),
self._text_key: node.get_content(metadata_mode=MetadataMode.NONE) or "",
self._metadata_key: metadata,
}
data_to_insert.append(entry)
ids.append(node.node_id)
logger.debug("Inserting data into DocumentDB: %s", data_to_insert)
insert_result = self._collection.insert_many(
data_to_insert, **self._insert_kwargs
)
logger.debug("Result of insert: %s", insert_result)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using by id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
if ref_doc_id is None:
raise ValueError("No document id provided to delete.")
self._collection.delete_one({self._metadata_key + ".ref_doc_id": ref_doc_id})
@property
def client(self) -> Any:
"""Return DocDB client."""
return self._docdb_client
def _query(
self, query: VectorStoreQuery, projection: Optional[Dict[str, int]] = None
) -> VectorStoreQueryResult:
params: Dict[str, Any] = {
"vector": query.query_embedding,
"path": self._embedding_key,
"similarity": self._similarity_score,
"k": query.similarity_top_k,
}
if query.filters:
params["filter"] = _to_mongodb_filter(query.filters)
if projection is None:
pipeline = [{"$search": {"vectorSearch": params}}]
else:
pipeline = [{"$search": {"vectorSearch": params}}, {"$project": projection}]
logger.debug("Running query pipeline: %s", pipeline)
cursor = self._collection.aggregate(pipeline) # type: ignore
top_k_nodes = []
top_k_ids = []
top_k_scores = []
for res in cursor:
text = res.pop(self._text_key)
vector = res.pop(self._embedding_key)
id = res.pop(self._id_key)
metadata_dict = res.pop(self._metadata_key)
score = similarity(query.query_embedding, vector, self._similarity_score)
try:
node = metadata_dict_to_node(metadata_dict)
node.set_content(text)
except Exception:
# NOTE: deprecated legacy logic for backward compatibility
metadata, node_info, relationships = legacy_metadata_dict_to_node(
metadata_dict
)
node = TextNode(
text=text,
id_=id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships=relationships,
)
top_k_ids.append(id)
top_k_nodes.append(node)
top_k_scores.append(score)
result = VectorStoreQueryResult(
nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids
)
logger.debug("Result of query: %s", result)
return result
def query(
self,
query: VectorStoreQuery,
projection: Optional[Dict[str, int]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query: a VectorStoreQuery object.
projection: a dictionary specifying which fields to return after the search
Returns:
A VectorStoreQueryResult containing the results of the query.
"""
return self._query(query, projection=projection)
def create_index(self, dimensions, similarity_score=None):
score = self._similarity_score
if similarity_score is not None:
score = similarity
return self._index_crud.create_index(dimensions, score)
def delete_index(self):
return self._index_crud.delete_index()
def __del__(self) -> None:
self._docdb_client.close()
| AWSDocDbVectorStore |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 38571,
"end": 41138
} | class ____(Node):
"""
A single character.
Unlike TeX, the font information and metrics are stored with each `Char`
to make it easier to lookup the font metrics when needed. Note that TeX
boxes have a width, height, and depth, unlike Type1 and TrueType which use
a full bounding box and an advance in the x-direction. The metrics must
be converted to the TeX model, and the advance (if different from width)
must be converted into a `Kern` node when the `Char` is added to its parent
`Hlist`.
"""
def __init__(self, c: str, state: ParserState):
super().__init__()
self.c = c
self.fontset = state.fontset
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __repr__(self) -> str:
return '`%s`' % self.c
def _update_metrics(self) -> None:
metrics = self._metrics = self.fontset.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self) -> bool:
return self._metrics.slanted
def get_kerning(self, next: Node | None) -> float:
"""
Return the amount of kerning between this and the given character.
This method is called when characters are strung together into `Hlist`
to create `Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.fontset.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, output: Output, x: float, y: float) -> None:
self.fontset.render_glyph(
output, x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self) -> None:
super().shrink()
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
| Char |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 3920,
"end": 4200
} | class ____(Message):
"""An undefined __future__ feature name was imported."""
message = 'future feature %s is not defined'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
| FutureFeatureNotDefined |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/asserts_test.py | {
"start": 1124,
"end": 1508
} | class ____(converter_testing.TestCase):
def test_basic(self):
def f(a):
assert a, 'testmsg'
return a
tr = self.transform(f, (functions, asserts, return_statements))
op = tr(constant_op.constant(False))
with self.assertRaisesRegex(errors_impl.InvalidArgumentError, 'testmsg'):
self.evaluate(op)
if __name__ == '__main__':
test.main()
| AssertsTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP010_1.py | {
"start": 302,
"end": 408
} | class ____():
def boo(self):
print(division)
__all__ = ["print_function", "generator_stop"]
| Foo |
python | doocs__leetcode | solution/1700-1799/1755.Closest Subsequence Sum/Solution.py | {
"start": 0,
"end": 919
} | class ____:
def minAbsDifference(self, nums: List[int], goal: int) -> int:
n = len(nums)
left = set()
right = set()
self.getSubSeqSum(0, 0, nums[: n // 2], left)
self.getSubSeqSum(0, 0, nums[n // 2 :], right)
result = inf
right = sorted(right)
rl = len(right)
for l in left:
remaining = goal - l
idx = bisect_left(right, remaining)
if idx < rl:
result = min(result, abs(remaining - right[idx]))
if idx > 0:
result = min(result, abs(remaining - right[idx - 1]))
return result
def getSubSeqSum(self, i: int, curr: int, arr: List[int], result: Set[int]):
if i == len(arr):
result.add(curr)
return
self.getSubSeqSum(i + 1, curr, arr, result)
self.getSubSeqSum(i + 1, curr + arr[i], arr, result)
| Solution |
python | django__django | tests/inspectdb/models.py | {
"start": 468,
"end": 620
} | class ____(models.Model):
people_pk = models.ForeignKey(People, models.CASCADE, primary_key=True)
ssn = models.CharField(max_length=11)
| PeopleData |
python | kamyu104__LeetCode-Solutions | Python/number-of-unequal-triplets-in-array.py | {
"start": 93,
"end": 551
} | class ____(object):
def unequalTriplets(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
K = 3
cnt = collections.Counter()
dp = [0]*K # dp[i]: number of unequal (i+1)-plets
for x in nums:
cnt[x] += 1
other_cnt = 1
for i in xrange(K):
dp[i] += other_cnt
other_cnt = dp[i]-cnt[x]*other_cnt
return dp[K-1]
| Solution |
python | marshmallow-code__marshmallow | examples/flask_example.py | {
"start": 1212,
"end": 1596
} | class ____(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method("format_name", dump_only=True)
def format_name(self, author):
return f"{author.last}, {author.first}"
# Custom validator
def must_not_be_blank(data):
if not data:
raise ValidationError("Data not provided.")
| AuthorSchema |
python | pallets__werkzeug | tests/test_debug.py | {
"start": 6783,
"end": 6863
} | class ____:
x = 42
y = 23
def __init__(self):
self.z = 15
| Foo |
python | django__django | tests/admin_filters/tests.py | {
"start": 2071,
"end": 2169
} | class ____(DecadeListFilter):
parameter_name = "publication-decade"
| DecadeListFilterWithoutTitle |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 7230,
"end": 7498
} | class ____(MPTTModel):
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
published = models.BooleanField()
objects = TreeManager()
foo_objects = MultipleManager()
| MultipleManagerModel |
python | vyperlang__vyper | vyper/semantics/types/module.py | {
"start": 1000,
"end": 9526
} | class ____(_UserType):
typeclass = "interface"
_type_members = {"address": AddressT()}
_is_prim_word = True
is_valid_element_type = True
_as_hashmap_key = True
_supports_external_calls = True
_attribute_in_annotation = True
def __init__(
self,
_id: str,
decl_node: Optional[vy_ast.VyperNode],
functions: dict,
events: dict,
structs: dict,
flags: dict,
) -> None:
validate_unique_method_ids(list(functions.values()))
members = functions | events | structs | flags
# sanity check: by construction, there should be no duplicates.
assert len(members) == len(functions) + len(events) + len(structs) + len(flags)
super().__init__(functions)
self._helper = VyperType(events | structs | flags)
self._id = _id
self._helper._id = _id
self.functions = functions
self.events = events
self.structs = structs
self.flags = flags
self.decl_node = decl_node
def get_type_member(self, attr, node):
# get an event, struct or flag from this interface
return TYPE_T(self._helper.get_member(attr, node))
@property
def getter_signature(self):
return (), AddressT()
@property
def abi_type(self) -> ABIType:
return ABI_Address()
def __str__(self):
return self._id
def __repr__(self):
return f"interface {self._id}"
def _try_fold(self, node):
if len(node.args) != 1:
raise UnfoldableNode("wrong number of args", node.args)
arg = node.args[0].get_folded_value()
if not isinstance(arg, vy_ast.Hex):
raise UnfoldableNode("not an address", arg)
return node
# when using the type itself (not an instance) in the call position
def _ctor_call_return(self, node: vy_ast.Call) -> "InterfaceT":
self._ctor_arg_types(node)
return self
def _ctor_arg_types(self, node):
validate_call_args(node, 1)
validate_expected_type(node.args[0], AddressT())
return [AddressT()]
def _ctor_kwarg_types(self, node):
return {}
def _ctor_modifiability_for_call(self, node: vy_ast.Call, modifiability: Modifiability) -> bool:
return check_modifiability(node.args[0], modifiability)
def validate_implements(
self, node: vy_ast.ImplementsDecl, functions: dict[ContractFunctionT, vy_ast.VyperNode]
) -> None:
# only external functions can implement interfaces
fns_by_name = {fn_t.name: fn_t for fn_t in functions.keys()}
unimplemented = []
def _is_function_implemented(fn_name, fn_type):
if fn_name not in fns_by_name:
return False
to_compare = fns_by_name[fn_name]
assert to_compare.is_external
assert isinstance(to_compare, ContractFunctionT)
assert isinstance(fn_type, ContractFunctionT)
return to_compare.implements(fn_type)
# check for missing functions
for name, type_ in self.functions.items():
if not isinstance(type_, ContractFunctionT):
# ex. address
continue
if not _is_function_implemented(name, type_):
unimplemented.append(type_._pp_signature)
if len(unimplemented) > 0:
# TODO: improve the error message for cases where the
# mismatch is small (like mutability, or just one argument
# is off, etc).
missing_str = ", ".join(sorted(unimplemented))
raise InterfaceViolation(
f"Contract does not implement all interface functions: {missing_str}", node
)
def to_toplevel_abi_dict(self) -> list[dict]:
abi = []
for event in self.events.values():
abi += event.to_toplevel_abi_dict()
for func in self.functions.values():
abi += func.to_toplevel_abi_dict()
return abi
# helper function which performs namespace collision checking
@classmethod
def _from_lists(
cls,
interface_name: str,
decl_node: Optional[vy_ast.VyperNode],
function_list: list[tuple[str, ContractFunctionT]],
event_list: Optional[list[tuple[str, EventT]]] = None,
struct_list: Optional[list[tuple[str, StructT]]] = None,
flag_list: Optional[list[tuple[str, FlagT]]] = None,
) -> "InterfaceT":
functions: dict[str, ContractFunctionT] = {}
events: dict[str, EventT] = {}
structs: dict[str, StructT] = {}
flags: dict[str, FlagT] = {}
seen_items: dict = {}
def _mark_seen(name, item):
if name in seen_items:
msg = f"multiple functions or events named '{name}'!"
prev_decl = seen_items[name].decl_node
raise NamespaceCollision(msg, item.decl_node, prev_decl=prev_decl)
seen_items[name] = item
def _process(dst_dict, items):
if items is None:
return
for name, item in items:
_mark_seen(name, item)
dst_dict[name] = item
_process(functions, function_list)
_process(events, event_list)
_process(structs, struct_list)
_process(flags, flag_list)
return cls(interface_name, decl_node, functions, events, structs, flags)
@classmethod
def from_json_abi(cls, name: str, abi: dict) -> "InterfaceT":
"""
Generate an `InterfaceT` object from an ABI.
Arguments
---------
name : str
The name of the interface
abi : dict
Contract ABI
Returns
-------
InterfaceT
primitive interface type
"""
functions: list = []
events: list = []
for item in [i for i in abi if i.get("type") == "function"]:
functions.append((item["name"], ContractFunctionT.from_abi(item)))
for item in [i for i in abi if i.get("type") == "event"]:
events.append((item["name"], EventT.from_abi(item)))
return cls._from_lists(name, None, functions, events)
@classmethod
def from_ModuleT(cls, module_t: "ModuleT") -> "InterfaceT":
"""
Generate an `InterfaceT` object from a Vyper ast node.
Arguments
---------
module_t: ModuleT
Vyper module type
Returns
-------
InterfaceT
primitive interface type
"""
funcs = []
for fn_t in module_t.exposed_functions:
funcs.append((fn_t.name, fn_t))
event_set: OrderedSet[EventT] = OrderedSet()
event_set.update([node._metadata["event_type"] for node in module_t.event_defs])
event_set.update(module_t.used_events)
events = [(event_t.name, event_t) for event_t in event_set]
# these are accessible via import, but they do not show up
# in the ABI json
structs = [(node.name, node._metadata["struct_type"]) for node in module_t.struct_defs]
flags = [(node.name, node._metadata["flag_type"]) for node in module_t.flag_defs]
return cls._from_lists(module_t._id, module_t.decl_node, funcs, events, structs, flags)
@classmethod
def from_InterfaceDef(cls, node: vy_ast.InterfaceDef) -> "InterfaceT":
functions = []
for func_ast in node.body:
if not isinstance(func_ast, vy_ast.FunctionDef):
raise StructureException(
"Interfaces can only contain function definitions", func_ast
)
if len(func_ast.decorator_list) > 0:
raise StructureException(
"Function definition in interface cannot be decorated",
func_ast.decorator_list[0],
)
functions.append((func_ast.name, ContractFunctionT.from_InterfaceDef(func_ast)))
return cls._from_lists(node.name, node, functions)
def _module_at(module_t):
return MemberFunctionT(
# set underlying_type to a TYPE_T as a bit of a kludge, since it's
# kind of like a class method (but we don't have classmethod
# abstraction)
underlying_type=TYPE_T(module_t),
name="__at__",
arg_types=[AddressT()],
return_type=module_t.interface,
is_modifying=False,
)
# Datatype to store all module information.
| InterfaceT |
python | ray-project__ray | rllib/utils/replay_buffers/fifo_replay_buffer.py | {
"start": 363,
"end": 3494
} | class ____(ReplayBuffer):
"""This replay buffer implements a FIFO queue.
Sometimes, e.g. for offline use cases, it may be desirable to use
off-policy algorithms without a Replay Buffer.
This FifoReplayBuffer can be used in-place to achieve the same effect
without having to introduce separate algorithm execution branches.
For simplicity and efficiency reasons, this replay buffer stores incoming
sample batches as-is, and returns them one at time.
This is to avoid any additional load when this replay buffer is used.
"""
def __init__(self, *args, **kwargs):
"""Initializes a FifoReplayBuffer.
Args:
``*args`` : Forward compatibility args.
``**kwargs``: Forward compatibility kwargs.
"""
# Completely by-passing underlying ReplayBuffer by setting its
# capacity to 1 (lowest allowed capacity).
ReplayBuffer.__init__(self, 1, StorageUnit.FRAGMENTS, **kwargs)
self._queue = []
@DeveloperAPI
@override(ReplayBuffer)
def add(self, batch: SampleBatchType, **kwargs) -> None:
return self._queue.append(batch)
@DeveloperAPI
@override(ReplayBuffer)
def sample(self, *args, **kwargs) -> Optional[SampleBatchType]:
"""Sample a saved training batch from this buffer.
Args:
``*args`` : Forward compatibility args.
``**kwargs``: Forward compatibility kwargs.
Returns:
A single training batch from the queue.
"""
if len(self._queue) <= 0:
# Return empty SampleBatch if queue is empty.
return MultiAgentBatch({}, 0)
batch = self._queue.pop(0)
# Equal weights of 1.0.
batch["weights"] = np.ones(len(batch))
return batch
@DeveloperAPI
def update_priorities(self, *args, **kwargs) -> None:
"""Update priorities of items at given indices.
No-op for this replay buffer.
Args:
``*args`` : Forward compatibility args.
``**kwargs``: Forward compatibility kwargs.
"""
pass
@DeveloperAPI
@override(ReplayBuffer)
def stats(self, debug: bool = False) -> Dict:
"""Returns the stats of this buffer.
Args:
debug: If true, adds sample eviction statistics to the returned stats dict.
Returns:
A dictionary of stats about this buffer.
"""
# As if this replay buffer has never existed.
return {}
@DeveloperAPI
@override(ReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Returns all local state.
Returns:
The serializable local state.
"""
# Pass through replay buffer does not save states.
return {}
@DeveloperAPI
@override(ReplayBuffer)
def set_state(self, state: Dict[str, Any]) -> None:
"""Restores all local state to the provided `state`.
Args:
state: The new state to set this buffer. Can be obtained by calling
`self.get_state()`.
"""
pass
| FifoReplayBuffer |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 32870,
"end": 34041
} | class ____(ASTExpression):
def __init__(self, op: str, expr: ASTExpression) -> None:
self.op = op
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTUnaryOpExpr):
return NotImplemented
return self.op == other.op and self.expr == other.expr
def __hash__(self) -> int:
return hash((self.op, self.expr))
def _stringify(self, transform: StringifyTransform) -> str:
if self.op[0] in 'cn':
return self.op + ' ' + transform(self.expr)
else:
return self.op + transform(self.expr)
def get_id(self, version: int) -> str:
return _id_operator_unary_v2[self.op] + self.expr.get_id(version)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
if self.op[0] in 'cn':
signode += addnodes.desc_sig_keyword(self.op, self.op)
signode += addnodes.desc_sig_space()
else:
signode += addnodes.desc_sig_operator(self.op, self.op)
self.expr.describe_signature(signode, mode, env, symbol)
| ASTUnaryOpExpr |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass1.py | {
"start": 5529,
"end": 7737
} | class ____(Generic[T]):
__match_args__ = ("x", "y")
x: T
y: T
def func1(points: list[Point[float] | Point[complex]]):
match points:
case [] as a1:
reveal_type(a1, expected_text="list[Point[float] | Point[complex]]")
reveal_type(points, expected_text="list[Point[float] | Point[complex]]")
case [Point(0, 0) as b1]:
reveal_type(b1, expected_text="Point[float] | Point[complex]")
reveal_type(points, expected_text="list[Point[float] | Point[complex]]")
case [Point(c1, c2)]:
reveal_type(c1, expected_text="float | complex")
reveal_type(c2, expected_text="float | complex")
reveal_type(points, expected_text="list[Point[float] | Point[complex]]")
case [Point(0, d1), Point(0, d2)]:
reveal_type(d1, expected_text="float | complex")
reveal_type(d2, expected_text="float | complex")
reveal_type(points, expected_text="list[Point[float] | Point[complex]]")
case _ as e1:
reveal_type(e1, expected_text="list[Point[float] | Point[complex]]")
reveal_type(points, expected_text="list[Point[float] | Point[complex]]")
def func2(subj: object):
match subj:
case list() as a1:
reveal_type(a1, expected_text="list[Unknown]")
reveal_type(subj, expected_text="list[Unknown]")
def func3(subj: int | str | dict[str, str]):
match subj:
case int(x):
reveal_type(x, expected_text="int")
reveal_type(subj, expected_text="int")
case str(x):
reveal_type(x, expected_text="str")
reveal_type(subj, expected_text="str")
case dict(x):
reveal_type(x, expected_text="dict[str, str]")
reveal_type(subj, expected_text="dict[str, str]")
def func4(subj: object):
match subj:
case int(x):
reveal_type(x, expected_text="int")
reveal_type(subj, expected_text="int")
case str(x):
reveal_type(x, expected_text="str")
reveal_type(subj, expected_text="str")
# Test the auto-generation of __match_args__ for dataclass.
@dataclass
| Point |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bandit/S307.py | {
"start": 75,
"end": 258
} | class ____(object):
def eval(self):
print("hi")
def foo(self):
self.eval() # OK
# https://github.com/astral-sh/ruff/issues/15522
map(eval, [])
foo = eval
| Class |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/input/win32.py | {
"start": 1696,
"end": 1956
} | class ____(Input):
"""
Base class for `Win32Input` and `Win32PipeInput`.
"""
def __init__(self) -> None:
self.win32_handles = _Win32Handles()
@property
@abstractmethod
def handle(self) -> HANDLE:
pass
| _Win32InputBase |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 12634,
"end": 12722
} | class ____(PydanticValueError):
msg_template = 'invalid datetime format'
| DateTimeError |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 585823,
"end": 590838
} | class ____(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade',
'special_bool_extra_args']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def is_cpp_comparison(self):
# cascaded comparisons aren't currently implemented for c++ classes.
return False
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
for extra_arg in self.special_bool_extra_args:
extra_arg.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": BitwiseOrNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"@": MatMultNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode,
}
def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](
pos,
operator=operator,
operand1=operand1,
operand2=operand2,
inplace=inplace,
**kwargs)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
| CascadedCmpNode |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 842058,
"end": 842436
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Project", graphql_name="node")
"""The item at the end of the edge."""
| ProjectEdge |
python | getsentry__sentry | tests/sentry_plugins/test_client.py | {
"start": 456,
"end": 1841
} | class ____(TestCase):
@responses.activate
def test_get(self) -> None:
responses.add(responses.GET, "http://example.com", json={})
resp = ApiClient().get("http://example.com")
assert isinstance(resp, BaseApiResponse)
assert resp.status_code == 200
@responses.activate
def test_post(self) -> None:
responses.add(responses.POST, "http://example.com", json={})
resp = ApiClient().post("http://example.com")
assert isinstance(resp, BaseApiResponse)
assert resp.status_code == 200
@responses.activate
def test_delete(self) -> None:
responses.add(responses.DELETE, "http://example.com", json={})
resp = ApiClient().delete("http://example.com")
assert isinstance(resp, BaseApiResponse)
assert resp.status_code == 200
@responses.activate
def test_put(self) -> None:
responses.add(responses.PUT, "http://example.com", json={})
resp = ApiClient().put("http://example.com")
assert isinstance(resp, BaseApiResponse)
assert resp.status_code == 200
@responses.activate
def test_patch(self) -> None:
responses.add(responses.PATCH, "http://example.com", json={})
resp = ApiClient().patch("http://example.com")
assert isinstance(resp, BaseApiResponse)
assert resp.status_code == 200
| ApiClientTest |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_ecs.py | {
"start": 40318,
"end": 41816
} | class ____(EcsBaseTestCase):
def test_execute_immediate_create(self):
"""Test if task definition created during initial request."""
mock_ti = mock.MagicMock(name="MockedTaskInstance")
expected_task_definition_config = {
"family": "family_name",
"containerDefinitions": [
{
"name": CONTAINER_NAME,
"image": "ubuntu",
"workingDirectory": "/usr/bin",
"entryPoint": ["sh", "-c"],
"command": ["ls"],
}
],
"cpu": "256",
"memory": "512",
"networkMode": "awsvpc",
}
op = EcsRegisterTaskDefinitionOperator(task_id="task", **TASK_DEFINITION_CONFIG)
with mock.patch.object(self.client, "register_task_definition") as mock_client_method:
mock_client_method.return_value = {
"taskDefinition": {"status": "ACTIVE", "taskDefinitionArn": "foo-bar"}
}
result = op.execute({"ti": mock_ti})
mock_client_method.assert_called_once_with(**expected_task_definition_config)
mock_ti.xcom_push.assert_called_once_with(key="task_definition_arn", value="foo-bar")
assert result == "foo-bar"
def test_template_fields(self):
op = EcsRegisterTaskDefinitionOperator(task_id="task", **TASK_DEFINITION_CONFIG)
validate_template_fields(op)
| TestEcsRegisterTaskDefinitionOperator |
python | django__django | tests/bulk_create/models.py | {
"start": 234,
"end": 600
} | class ____(models.Model):
name = models.CharField(max_length=255)
iso_two_letter = models.CharField(max_length=2)
description = models.TextField()
class Meta:
constraints = [
models.UniqueConstraint(
fields=["iso_two_letter", "name"],
name="country_name_iso_unique",
),
]
| Country |
python | has2k1__plotnine | plotnine/facets/labelling.py | {
"start": 7008,
"end": 7194
} | class ____(metaclass=ABCMeta):
"""
Per item
"""
@abstractmethod
def __call__(self, label_info: strip_label_details) -> strip_label_details:
pass
| _core_labeller |
python | doocs__leetcode | lcof2/剑指 Offer II 013. 二维子矩阵的和/Solution.py | {
"start": 0,
"end": 755
} | class ____:
def __init__(self, matrix: List[List[int]]):
self.s = [[0] * (len(matrix[0]) + 1) for _ in range(len(matrix) + 1)]
for i, row in enumerate(matrix, 1):
for j, x in enumerate(row, 1):
self.s[i][j] = (
self.s[i - 1][j] + self.s[i][j - 1] - self.s[i - 1][j - 1] + x
)
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
return (
self.s[row2 + 1][col2 + 1]
- self.s[row2 + 1][col1]
- self.s[row1][col2 + 1]
+ self.s[row1][col1]
)
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| NumMatrix |
python | openai__openai-python | src/openai/types/graders/score_model_grader.py | {
"start": 2940,
"end": 3463
} | class ____(BaseModel):
input: List[Input]
"""The input text. This may include template strings."""
model: str
"""The model to use for the evaluation."""
name: str
"""The name of the grader."""
type: Literal["score_model"]
"""The object type, which is always `score_model`."""
range: Optional[List[float]] = None
"""The range of the score. Defaults to `[0, 1]`."""
sampling_params: Optional[SamplingParams] = None
"""The sampling parameters for the model."""
| ScoreModelGrader |
python | great-expectations__great_expectations | great_expectations/execution_engine/partition_and_sample/data_partitioner.py | {
"start": 2463,
"end": 6869
} | class ____(abc.ABC): # noqa: B024 # FIXME CoP
"""Abstract base class containing methods for partitioning data accessible via Execution Engines.
Note, for convenience, you can also access DatePart via the instance variable
date_part e.g. DataPartitioner.date_part.MONTH
""" # noqa: E501 # FIXME CoP
date_part: ClassVar[Type[DatePart]] = DatePart
def get_partitioner_method(self, partitioner_method_name: str) -> Callable:
"""Get the appropriate partitioner method from the method name.
Args:
partitioner_method_name: name of the partitioner to retrieve.
Returns:
partitioner method.
"""
partitioner_method_name = self._get_partitioner_method_name(partitioner_method_name)
return getattr(self, partitioner_method_name)
@staticmethod
def _get_partitioner_method_name(partitioner_method_name: str) -> str:
"""Accept partitioner methods with or without starting with `_`.
Args:
partitioner_method_name: partitioner name starting with or without preceding `_`.
Returns:
partitioner method name stripped of preceding underscore.
"""
if partitioner_method_name.startswith("_"):
return partitioner_method_name[1:]
else:
return partitioner_method_name
@staticmethod
def _convert_date_parts(date_parts: List[DatePart] | List[str]) -> List[DatePart]:
"""Convert a list of date parts to DatePart objects.
Args:
date_parts: List of DatePart or string representations of DatePart.
Returns:
List of DatePart objects
"""
return [
DatePart(date_part.lower()) if isinstance(date_part, str) else date_part
for date_part in date_parts
]
@staticmethod
def _validate_date_parts(date_parts: List[DatePart] | List[str]) -> None:
"""Validate that date parts exist and are of the correct type.
Args:
date_parts: DatePart instances or str.
Returns:
None, this method raises exceptions if the config is invalid.
"""
if len(date_parts) == 0:
raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP
"date_parts are required when using partition_on_date_parts."
)
if not all(isinstance(dp, (DatePart, str)) for dp in date_parts):
raise gx_exceptions.InvalidConfigError("date_parts should be of type DatePart or str.") # noqa: TRY003 # FIXME CoP
@staticmethod
def _verify_all_strings_are_valid_date_parts(date_part_strings: List[str]) -> None:
"""Verify date part strings by trying to load as DatePart instances.
Args:
date_part_strings: A list of strings that should correspond to DatePart.
Returns:
None, raises an exception if unable to convert.
"""
try:
[DatePart(date_part_string) for date_part_string in date_part_strings]
except ValueError as e:
raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP
f"{e} please only specify strings that are supported in DatePart: {[dp.value for dp in DatePart]}" # noqa: E501 # FIXME CoP
)
def _convert_datetime_batch_identifiers_to_date_parts_dict(
self,
column_batch_identifiers: datetime.datetime | str | dict,
date_parts: List[DatePart],
) -> dict:
"""Convert batch identifiers to a dict of {date_part as str: date_part value}.
Args:
column_batch_identifiers: Batch identifiers related to the column of interest.
date_parts: List of DatePart to include in the return value.
Returns:
A dict of {date_part as str: date_part value} eg. {"day": 3}.
"""
if isinstance(column_batch_identifiers, str):
column_batch_identifiers = parse(column_batch_identifiers)
if isinstance(column_batch_identifiers, datetime.datetime):
return {
date_part.value: getattr(column_batch_identifiers, date_part.value)
for date_part in date_parts
}
else:
self._verify_all_strings_are_valid_date_parts(list(column_batch_identifiers.keys()))
return column_batch_identifiers
| DataPartitioner |
python | huggingface__transformers | src/transformers/integrations/tensor_parallel.py | {
"start": 45147,
"end": 49264
} | class ____(TensorParallelLayer):
"""
Allows to reshape the router scores to support running expert parallel.
"""
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
self.args = args
self.use_dtensor = False
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):
input_tensor = inputs[0]
if isinstance(input_tensor, DTensor):
raise NotImplementedError("RouterParallel does not support DTensor input for now")
return input_tensor
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
"""
Imagine if you had 4 tokens, top_k = 4, and 128experts.
With EP = 8. The num_local_expert should be 128/8 = 16
Imagine router_indices being:
[ 52, 42, 119, 67],
[102, 89, 61, 40],
[ 82, 103, 4, 34],
[ 93, 23, 109, 11],
then you can map which rank should be getting which values
[3, 2, 7, 4],
[6, 5, 3, 2],
[5, 6, 0, 2],
[5, 1, 6, 0],
Thus for say rank 0, you fill with 16 (num_local_expert) the index tensor
[ 16, 16, 16, 16],
[ 16, 16, 16, 16],
[ 16, 16, 4, 16],
[ 16, 16, 16, 11],
This works well. For another rank you need to make sure you round to num_local_expert
because the next operation will one hot encode the router index vector.
This allows us to know directly which local expert is hit.
Similarly the scores are indexed with something created form
router_indices.
The kinda naive training loop that we use for device_map "auto" uses a similar logic.
Here we are just making each rank believe that he is alone, and he computes his part of the hiddenstates.
Mask invalid indices with num_local_expert for one-hot encoding, so the computes will skip the masking index.
"""
ep_rank, ep_size = device_mesh.get_local_rank(), device_mesh.size()
if mod.num_experts % ep_size != 0:
raise ValueError(
f"The number of experts must be divisible by number of ep_size: {mod.num_experts} % {ep_size} != 0"
)
num_local_experts = mod.num_experts // ep_size
router_scores, router_indices = outputs
router_scores = router_scores[:, ep_rank * num_local_experts : (ep_rank + 1) * num_local_experts]
router_indices = router_indices.masked_fill((router_indices // num_local_experts) != ep_rank, -1)
# As -1 % 1 is 0, we can only use mask fill when num_local_experts is 1
if num_local_experts > 1:
router_indices = torch.fmod(router_indices, num_local_experts)
else:
router_indices = router_indices.masked_fill(router_indices > 0, 0).masked_fill(router_indices < 0, -1)
router_indices = router_indices.masked_fill(
router_indices == -1, num_local_experts
) # masking class for one hot
return router_scores, router_indices
def shard_tensor(
self,
param,
param_type=None,
param_casting_dtype=None,
to_contiguous=None,
rank=None,
device_mesh=None,
tensor_idx=None,
):
parameter = param[...].to(param_casting_dtype)
self.shard = None
return parameter, None
def partition_tensor(self, param, empty_param, param_type, param_casting_dtype, to_contiguous, rank, device_mesh):
# TODO: i'd like for this to be the default
param = param[...].to(param_casting_dtype)
if to_contiguous:
param = param.contiguous()
return param
def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module:
# TODO: need an abstract Parallel class that is different from TensorParallelLayer
distribute_module(
module,
device_mesh,
partial(self._prepare_input_fn, None, None),
partial(self._prepare_output_fn, None, None),
)
| RouterParallel |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/data_adapter.py | {
"start": 20101,
"end": 22992
} | class ____(DataAdapter):
"""Adapter that handles composite tensor."""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_composite(v):
# Dataset/iterator/DistributedDataset inherits from CompositeTensor but
# should be handled by DatasetAdapter and GeneratorAdapter.
if (tf_utils.is_extension_type(v) and
not isinstance(v,
(dataset_ops.DatasetV2, iterator_ops.IteratorBase)) and
not _is_distributed_dataset(v)):
return True
# Support Scipy sparse tensors if scipy is installed
return _is_scipy_sparse(v)
def _is_tensor_or_composite(v):
if isinstance(v, (tensor.Tensor, np.ndarray)):
return True
return _is_composite(v)
return (any(_is_composite(v) for v in flat_inputs) and
all(_is_tensor_or_composite(v) for v in flat_inputs))
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
steps=None,
shuffle=False,
**kwargs):
super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
dataset = dataset_ops.DatasetV2.from_tensor_slices(inputs)
num_samples = int(nest.flatten(x)[0].shape[0])
if shuffle:
dataset = dataset.shuffle(num_samples)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
dataset = dataset.batch(batch_size)
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._has_partial_batch = (self._size != (num_samples // batch_size))
self._partial_batch_size = None
if self._has_partial_batch:
self._partial_batch_size = (
num_samples - (self._size - 1) * self._batch_size)
self._dataset = dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._has_partial_batch
def partial_batch_size(self):
return self._partial_batch_size
def should_recreate_iterator(self):
return True
| CompositeTensorDataAdapter |
python | encode__starlette | starlette/exceptions.py | {
"start": 671,
"end": 1066
} | class ____(Exception):
def __init__(self, code: int, reason: str | None = None) -> None:
self.code = code
self.reason = reason or ""
def __str__(self) -> str:
return f"{self.code}: {self.reason}"
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f"{class_name}(code={self.code!r}, reason={self.reason!r})"
| WebSocketException |
python | huggingface__transformers | src/transformers/models/smolvlm/configuration_smolvlm.py | {
"start": 1350,
"end": 5401
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a
SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
[google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM
[HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1152):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer
>>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig
>>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration
>>> configuration = SmolVLMVisionConfig()
>>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration
>>> model = SmolVLMVisionTransformer(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "smolvlm_vision"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1152,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=16,
num_channels=3,
image_size=224,
patch_size=32,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
| SmolVLMVisionConfig |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/builtin_config_schemas.py | {
"start": 2439,
"end": 2833
} | class ____:
ANY_INPUT = define_any_input_schema()
BOOL_INPUT = define_builtin_scalar_input_schema("Bool", ConfigBoolInstance)
FLOAT_INPUT = define_builtin_scalar_input_schema("Float", ConfigFloatInstance)
INT_INPUT = define_builtin_scalar_input_schema("Int", ConfigIntInstance)
STRING_INPUT = define_builtin_scalar_input_schema("String", ConfigStringInstance)
| BuiltinSchemas |
python | pypa__pip | src/pip/_vendor/rich/text.py | {
"start": 2976,
"end": 47546
} | class ____(JupyterMixin):
"""Text with color / style.
Args:
text (str, optional): Default unstyled text. Defaults to "".
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
spans (List[Span], optional). A list of predefined style spans. Defaults to None.
"""
__slots__ = [
"_text",
"style",
"justify",
"overflow",
"no_wrap",
"end",
"tab_size",
"_spans",
"_length",
]
def __init__(
self,
text: str = "",
style: Union[str, Style] = "",
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: Optional[int] = None,
spans: Optional[List[Span]] = None,
) -> None:
sanitized_text = strip_control_codes(text)
self._text = [sanitized_text]
self.style = style
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.end = end
self.tab_size = tab_size
self._spans: List[Span] = spans or []
self._length: int = len(sanitized_text)
def __len__(self) -> int:
return self._length
def __bool__(self) -> bool:
return bool(self._length)
def __str__(self) -> str:
return self.plain
def __repr__(self) -> str:
return f"<text {self.plain!r} {self._spans!r} {self.style!r}>"
def __add__(self, other: Any) -> "Text":
if isinstance(other, (str, Text)):
result = self.copy()
result.append(other)
return result
return NotImplemented
def __eq__(self, other: object) -> bool:
if not isinstance(other, Text):
return NotImplemented
return self.plain == other.plain and self._spans == other._spans
def __contains__(self, other: object) -> bool:
if isinstance(other, str):
return other in self.plain
elif isinstance(other, Text):
return other.plain in self.plain
return False
def __getitem__(self, slice: Union[int, slice]) -> "Text":
def get_text_at(offset: int) -> "Text":
_Span = Span
text = Text(
self.plain[offset],
spans=[
_Span(0, 1, style)
for start, end, style in self._spans
if end > offset >= start
],
end="",
)
return text
if isinstance(slice, int):
return get_text_at(slice)
else:
start, stop, step = slice.indices(len(self.plain))
if step == 1:
lines = self.divide([start, stop])
return lines[1]
else:
# This would be a bit of work to implement efficiently
# For now, its not required
raise TypeError("slices with step!=1 are not supported")
@property
def cell_len(self) -> int:
"""Get the number of cells required to render this text."""
return cell_len(self.plain)
@property
def markup(self) -> str:
"""Get console markup to render this Text.
Returns:
str: A string potentially creating markup tags.
"""
from .markup import escape
output: List[str] = []
plain = self.plain
markup_spans = [
(0, False, self.style),
*((span.start, False, span.style) for span in self._spans),
*((span.end, True, span.style) for span in self._spans),
(len(plain), True, self.style),
]
markup_spans.sort(key=itemgetter(0, 1))
position = 0
append = output.append
for offset, closing, style in markup_spans:
if offset > position:
append(escape(plain[position:offset]))
position = offset
if style:
append(f"[/{style}]" if closing else f"[{style}]")
markup = "".join(output)
return markup
@classmethod
def from_markup(
cls,
text: str,
*,
style: Union[str, Style] = "",
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
end: str = "\n",
) -> "Text":
"""Create Text instance from markup.
Args:
text (str): A string containing console markup.
style (Union[str, Style], optional): Base style for text. Defaults to "".
emoji (bool, optional): Also render emoji code. Defaults to True.
emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
Returns:
Text: A Text instance with markup rendered.
"""
from .markup import render
rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)
rendered_text.justify = justify
rendered_text.overflow = overflow
rendered_text.end = end
return rendered_text
@classmethod
def from_ansi(
cls,
text: str,
*,
style: Union[str, Style] = "",
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: Optional[int] = 8,
) -> "Text":
"""Create a Text object from a string containing ANSI escape codes.
Args:
text (str): A string containing escape codes.
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
"""
from .ansi import AnsiDecoder
joiner = Text(
"\n",
justify=justify,
overflow=overflow,
no_wrap=no_wrap,
end=end,
tab_size=tab_size,
style=style,
)
decoder = AnsiDecoder()
result = joiner.join(line for line in decoder.decode(text))
return result
@classmethod
def styled(
cls,
text: str,
style: StyleType = "",
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
) -> "Text":
"""Construct a Text instance with a pre-applied styled. A style applied in this way won't be used
to pad the text when it is justified.
Args:
text (str): A string containing console markup.
style (Union[str, Style]): Style to apply to the text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
Returns:
Text: A text instance with a style applied to the entire string.
"""
styled_text = cls(text, justify=justify, overflow=overflow)
styled_text.stylize(style)
return styled_text
@classmethod
def assemble(
cls,
*parts: Union[str, "Text", Tuple[str, StyleType]],
style: Union[str, Style] = "",
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: int = 8,
meta: Optional[Dict[str, Any]] = None,
) -> "Text":
"""Construct a text instance by combining a sequence of strings with optional styles.
The positional arguments should be either strings, or a tuple of string + style.
Args:
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None
Returns:
Text: A new text instance.
"""
text = cls(
style=style,
justify=justify,
overflow=overflow,
no_wrap=no_wrap,
end=end,
tab_size=tab_size,
)
append = text.append
_Text = Text
for part in parts:
if isinstance(part, (_Text, str)):
append(part)
else:
append(*part)
if meta:
text.apply_meta(meta)
return text
@property
def plain(self) -> str:
"""Get the text as a single string."""
if len(self._text) != 1:
self._text[:] = ["".join(self._text)]
return self._text[0]
@plain.setter
def plain(self, new_text: str) -> None:
"""Set the text to a new value."""
if new_text != self.plain:
sanitized_text = strip_control_codes(new_text)
self._text[:] = [sanitized_text]
old_length = self._length
self._length = len(sanitized_text)
if old_length > self._length:
self._trim_spans()
@property
def spans(self) -> List[Span]:
"""Get a reference to the internal list of spans."""
return self._spans
@spans.setter
def spans(self, spans: List[Span]) -> None:
"""Set spans."""
self._spans = spans[:]
def blank_copy(self, plain: str = "") -> "Text":
"""Return a new Text instance with copied metadata (but not the string or spans)."""
copy_self = Text(
plain,
style=self.style,
justify=self.justify,
overflow=self.overflow,
no_wrap=self.no_wrap,
end=self.end,
tab_size=self.tab_size,
)
return copy_self
def copy(self) -> "Text":
"""Return a copy of this instance."""
copy_self = Text(
self.plain,
style=self.style,
justify=self.justify,
overflow=self.overflow,
no_wrap=self.no_wrap,
end=self.end,
tab_size=self.tab_size,
)
copy_self._spans[:] = self._spans
return copy_self
def stylize(
self,
style: Union[str, Style],
start: int = 0,
end: Optional[int] = None,
) -> None:
"""Apply a style to the text, or a portion of the text.
Args:
style (Union[str, Style]): Style instance or style definition to apply.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
if style:
length = len(self)
if start < 0:
start = length + start
if end is None:
end = length
if end < 0:
end = length + end
if start >= length or end <= start:
# Span not in text or not valid
return
self._spans.append(Span(start, min(length, end), style))
def stylize_before(
self,
style: Union[str, Style],
start: int = 0,
end: Optional[int] = None,
) -> None:
"""Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.
Args:
style (Union[str, Style]): Style instance or style definition to apply.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
if style:
length = len(self)
if start < 0:
start = length + start
if end is None:
end = length
if end < 0:
end = length + end
if start >= length or end <= start:
# Span not in text or not valid
return
self._spans.insert(0, Span(start, min(length, end), style))
def apply_meta(
self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None
) -> None:
"""Apply metadata to the text, or a portion of the text.
Args:
meta (Dict[str, Any]): A dict of meta information.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
style = Style.from_meta(meta)
self.stylize(style, start=start, end=end)
def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text":
"""Apply event handlers (used by Textual project).
Example:
>>> from rich.text import Text
>>> text = Text("hello world")
>>> text.on(click="view.toggle('world')")
Args:
meta (Dict[str, Any]): Mapping of meta information.
**handlers: Keyword args are prefixed with "@" to defined handlers.
Returns:
Text: Self is returned to method may be chained.
"""
meta = {} if meta is None else meta
meta.update({f"@{key}": value for key, value in handlers.items()})
self.stylize(Style.from_meta(meta))
return self
def remove_suffix(self, suffix: str) -> None:
"""Remove a suffix if it exists.
Args:
suffix (str): Suffix to remove.
"""
if self.plain.endswith(suffix):
self.right_crop(len(suffix))
def get_style_at_offset(self, console: "Console", offset: int) -> Style:
"""Get the style of a character at give offset.
Args:
console (~Console): Console where text will be rendered.
offset (int): Offset in to text (negative indexing supported)
Returns:
Style: A Style instance.
"""
# TODO: This is a little inefficient, it is only used by full justify
if offset < 0:
offset = len(self) + offset
get_style = console.get_style
style = get_style(self.style).copy()
for start, end, span_style in self._spans:
if end > offset >= start:
style += get_style(span_style, default="")
return style
def extend_style(self, spaces: int) -> None:
"""Extend the Text given number of spaces where the spaces have the same style as the last character.
Args:
spaces (int): Number of spaces to add to the Text.
"""
if spaces <= 0:
return
spans = self.spans
new_spaces = " " * spaces
if spans:
end_offset = len(self)
self._spans[:] = [
span.extend(spaces) if span.end >= end_offset else span
for span in spans
]
self._text.append(new_spaces)
self._length += spaces
else:
self.plain += new_spaces
def highlight_regex(
self,
re_highlight: Union[Pattern[str], str],
style: Optional[Union[GetStyleCallable, StyleType]] = None,
*,
style_prefix: str = "",
) -> int:
"""Highlight text with a regular expression, where group names are
translated to styles.
Args:
re_highlight (Union[re.Pattern, str]): A regular expression object or string.
style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable
which accepts the matched text and returns a style. Defaults to None.
style_prefix (str, optional): Optional prefix to add to style group names.
Returns:
int: Number of regex matches
"""
count = 0
append_span = self._spans.append
_Span = Span
plain = self.plain
if isinstance(re_highlight, str):
re_highlight = re.compile(re_highlight)
for match in re_highlight.finditer(plain):
get_span = match.span
if style:
start, end = get_span()
match_style = style(plain[start:end]) if callable(style) else style
if match_style is not None and end > start:
append_span(_Span(start, end, match_style))
count += 1
for name in match.groupdict().keys():
start, end = get_span(name)
if start != -1 and end > start:
append_span(_Span(start, end, f"{style_prefix}{name}"))
return count
def highlight_words(
self,
words: Iterable[str],
style: Union[str, Style],
*,
case_sensitive: bool = True,
) -> int:
"""Highlight words with a style.
Args:
words (Iterable[str]): Words to highlight.
style (Union[str, Style]): Style to apply.
case_sensitive (bool, optional): Enable case sensitive matching. Defaults to True.
Returns:
int: Number of words highlighted.
"""
re_words = "|".join(re.escape(word) for word in words)
add_span = self._spans.append
count = 0
_Span = Span
for match in re.finditer(
re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE
):
start, end = match.span(0)
add_span(_Span(start, end, style))
count += 1
return count
def rstrip(self) -> None:
"""Strip whitespace from end of text."""
self.plain = self.plain.rstrip()
def rstrip_end(self, size: int) -> None:
"""Remove whitespace beyond a certain width at the end of the text.
Args:
size (int): The desired size of the text.
"""
text_length = len(self)
if text_length > size:
excess = text_length - size
whitespace_match = _re_whitespace.search(self.plain)
if whitespace_match is not None:
whitespace_count = len(whitespace_match.group(0))
self.right_crop(min(whitespace_count, excess))
def set_length(self, new_length: int) -> None:
"""Set new length of the text, clipping or padding is required."""
length = len(self)
if length != new_length:
if length < new_length:
self.pad_right(new_length - length)
else:
self.right_crop(length - new_length)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> Iterable[Segment]:
tab_size: int = console.tab_size if self.tab_size is None else self.tab_size
justify = self.justify or options.justify or DEFAULT_JUSTIFY
overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW
lines = self.wrap(
console,
options.max_width,
justify=justify,
overflow=overflow,
tab_size=tab_size or 8,
no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),
)
all_lines = Text("\n").join(lines)
yield from all_lines.render(console, end=self.end)
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
text = self.plain
lines = text.splitlines()
max_text_width = max(cell_len(line) for line in lines) if lines else 0
words = text.split()
min_text_width = (
max(cell_len(word) for word in words) if words else max_text_width
)
return Measurement(min_text_width, max_text_width)
def render(self, console: "Console", end: str = "") -> Iterable["Segment"]:
"""Render the text as Segments.
Args:
console (Console): Console instance.
end (Optional[str], optional): Optional end character.
Returns:
Iterable[Segment]: Result of render that may be written to the console.
"""
_Segment = Segment
text = self.plain
if not self._spans:
yield Segment(text)
if end:
yield _Segment(end)
return
get_style = partial(console.get_style, default=Style.null())
enumerated_spans = list(enumerate(self._spans, 1))
style_map = {index: get_style(span.style) for index, span in enumerated_spans}
style_map[0] = get_style(self.style)
spans = [
(0, False, 0),
*((span.start, False, index) for index, span in enumerated_spans),
*((span.end, True, index) for index, span in enumerated_spans),
(len(text), True, 0),
]
spans.sort(key=itemgetter(0, 1))
stack: List[int] = []
stack_append = stack.append
stack_pop = stack.remove
style_cache: Dict[Tuple[Style, ...], Style] = {}
style_cache_get = style_cache.get
combine = Style.combine
def get_current_style() -> Style:
"""Construct current style from stack."""
styles = tuple(style_map[_style_id] for _style_id in sorted(stack))
cached_style = style_cache_get(styles)
if cached_style is not None:
return cached_style
current_style = combine(styles)
style_cache[styles] = current_style
return current_style
for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):
if leaving:
stack_pop(style_id)
else:
stack_append(style_id)
if next_offset > offset:
yield _Segment(text[offset:next_offset], get_current_style())
if end:
yield _Segment(end)
def join(self, lines: Iterable["Text"]) -> "Text":
"""Join text together with this instance as the separator.
Args:
lines (Iterable[Text]): An iterable of Text instances to join.
Returns:
Text: A new text instance containing join text.
"""
new_text = self.blank_copy()
def iter_text() -> Iterable["Text"]:
if self.plain:
for last, line in loop_last(lines):
yield line
if not last:
yield self
else:
yield from lines
extend_text = new_text._text.extend
append_span = new_text._spans.append
extend_spans = new_text._spans.extend
offset = 0
_Span = Span
for text in iter_text():
extend_text(text._text)
if text.style:
append_span(_Span(offset, offset + len(text), text.style))
extend_spans(
_Span(offset + start, offset + end, style)
for start, end, style in text._spans
)
offset += len(text)
new_text._length = offset
return new_text
def expand_tabs(self, tab_size: Optional[int] = None) -> None:
"""Converts tabs to spaces.
Args:
tab_size (int, optional): Size of tabs. Defaults to 8.
"""
if "\t" not in self.plain:
return
if tab_size is None:
tab_size = self.tab_size
if tab_size is None:
tab_size = 8
new_text: List[Text] = []
append = new_text.append
for line in self.split("\n", include_separator=True):
if "\t" not in line.plain:
append(line)
else:
cell_position = 0
parts = line.split("\t", include_separator=True)
for part in parts:
if part.plain.endswith("\t"):
part._text[-1] = part._text[-1][:-1] + " "
cell_position += part.cell_len
tab_remainder = cell_position % tab_size
if tab_remainder:
spaces = tab_size - tab_remainder
part.extend_style(spaces)
cell_position += spaces
else:
cell_position += part.cell_len
append(part)
result = Text("").join(new_text)
self._text = [result.plain]
self._length = len(self.plain)
self._spans[:] = result._spans
def truncate(
self,
max_width: int,
*,
overflow: Optional["OverflowMethod"] = None,
pad: bool = False,
) -> None:
"""Truncate text if it is longer that a given width.
Args:
max_width (int): Maximum number of characters in text.
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow.
pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.
"""
_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
if _overflow != "ignore":
length = cell_len(self.plain)
if length > max_width:
if _overflow == "ellipsis":
self.plain = set_cell_size(self.plain, max_width - 1) + "…"
else:
self.plain = set_cell_size(self.plain, max_width)
if pad and length < max_width:
spaces = max_width - length
self._text = [f"{self.plain}{' ' * spaces}"]
self._length = len(self.plain)
def _trim_spans(self) -> None:
"""Remove or modify any spans that are over the end of the text."""
max_offset = len(self.plain)
_Span = Span
self._spans[:] = [
(
span
if span.end < max_offset
else _Span(span.start, min(max_offset, span.end), span.style)
)
for span in self._spans
if span.start < max_offset
]
def pad(self, count: int, character: str = " ") -> None:
"""Pad left and right with a given number of characters.
Args:
count (int): Width of padding.
character (str): The character to pad with. Must be a string of length 1.
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
pad_characters = character * count
self.plain = f"{pad_characters}{self.plain}{pad_characters}"
_Span = Span
self._spans[:] = [
_Span(start + count, end + count, style)
for start, end, style in self._spans
]
def pad_left(self, count: int, character: str = " ") -> None:
"""Pad the left with a given character.
Args:
count (int): Number of characters to pad.
character (str, optional): Character to pad with. Defaults to " ".
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
self.plain = f"{character * count}{self.plain}"
_Span = Span
self._spans[:] = [
_Span(start + count, end + count, style)
for start, end, style in self._spans
]
def pad_right(self, count: int, character: str = " ") -> None:
"""Pad the right with a given character.
Args:
count (int): Number of characters to pad.
character (str, optional): Character to pad with. Defaults to " ".
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
self.plain = f"{self.plain}{character * count}"
def align(self, align: AlignMethod, width: int, character: str = " ") -> None:
"""Align text to a given width.
Args:
align (AlignMethod): One of "left", "center", or "right".
width (int): Desired width.
character (str, optional): Character to pad with. Defaults to " ".
"""
self.truncate(width)
excess_space = width - cell_len(self.plain)
if excess_space:
if align == "left":
self.pad_right(excess_space, character)
elif align == "center":
left = excess_space // 2
self.pad_left(left, character)
self.pad_right(excess_space - left, character)
else:
self.pad_left(excess_space, character)
def append(
self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None
) -> "Text":
"""Add text with an optional style.
Args:
text (Union[Text, str]): A str or Text to append.
style (str, optional): A style name. Defaults to None.
Returns:
Text: Returns self for chaining.
"""
if not isinstance(text, (str, Text)):
raise TypeError("Only str or Text can be appended to Text")
if len(text):
if isinstance(text, str):
sanitized_text = strip_control_codes(text)
self._text.append(sanitized_text)
offset = len(self)
text_length = len(sanitized_text)
if style:
self._spans.append(Span(offset, offset + text_length, style))
self._length += text_length
elif isinstance(text, Text):
_Span = Span
if style is not None:
raise ValueError(
"style must not be set when appending Text instance"
)
text_length = self._length
if text.style:
self._spans.append(
_Span(text_length, text_length + len(text), text.style)
)
self._text.append(text.plain)
self._spans.extend(
_Span(start + text_length, end + text_length, style)
for start, end, style in text._spans.copy()
)
self._length += len(text)
return self
def append_text(self, text: "Text") -> "Text":
"""Append another Text instance. This method is more performant that Text.append, but
only works for Text.
Args:
text (Text): The Text instance to append to this instance.
Returns:
Text: Returns self for chaining.
"""
_Span = Span
text_length = self._length
if text.style:
self._spans.append(_Span(text_length, text_length + len(text), text.style))
self._text.append(text.plain)
self._spans.extend(
_Span(start + text_length, end + text_length, style)
for start, end, style in text._spans.copy()
)
self._length += len(text)
return self
def append_tokens(
self, tokens: Iterable[Tuple[str, Optional[StyleType]]]
) -> "Text":
"""Append iterable of str and style. Style may be a Style instance or a str style definition.
Args:
tokens (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.
Returns:
Text: Returns self for chaining.
"""
append_text = self._text.append
append_span = self._spans.append
_Span = Span
offset = len(self)
for content, style in tokens:
content = strip_control_codes(content)
append_text(content)
if style:
append_span(_Span(offset, offset + len(content), style))
offset += len(content)
self._length = offset
return self
def copy_styles(self, text: "Text") -> None:
"""Copy styles from another Text instance.
Args:
text (Text): A Text instance to copy styles from, must be the same length.
"""
self._spans.extend(text._spans)
def split(
self,
separator: str = "\n",
*,
include_separator: bool = False,
allow_blank: bool = False,
) -> Lines:
"""Split rich text in to lines, preserving styles.
Args:
separator (str, optional): String to split on. Defaults to "\\\\n".
include_separator (bool, optional): Include the separator in the lines. Defaults to False.
allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.
Returns:
List[RichText]: A list of rich text, one per line of the original.
"""
assert separator, "separator must not be empty"
text = self.plain
if separator not in text:
return Lines([self.copy()])
if include_separator:
lines = self.divide(
match.end() for match in re.finditer(re.escape(separator), text)
)
else:
def flatten_spans() -> Iterable[int]:
for match in re.finditer(re.escape(separator), text):
start, end = match.span()
yield start
yield end
lines = Lines(
line for line in self.divide(flatten_spans()) if line.plain != separator
)
if not allow_blank and text.endswith(separator):
lines.pop()
return lines
def divide(self, offsets: Iterable[int]) -> Lines:
"""Divide text in to a number of lines at given offsets.
Args:
offsets (Iterable[int]): Offsets used to divide text.
Returns:
Lines: New RichText instances between offsets.
"""
_offsets = list(offsets)
if not _offsets:
return Lines([self.copy()])
text = self.plain
text_length = len(text)
divide_offsets = [0, *_offsets, text_length]
line_ranges = list(zip(divide_offsets, divide_offsets[1:]))
style = self.style
justify = self.justify
overflow = self.overflow
_Text = Text
new_lines = Lines(
_Text(
text[start:end],
style=style,
justify=justify,
overflow=overflow,
)
for start, end in line_ranges
)
if not self._spans:
return new_lines
_line_appends = [line._spans.append for line in new_lines._lines]
line_count = len(line_ranges)
_Span = Span
for span_start, span_end, style in self._spans:
lower_bound = 0
upper_bound = line_count
start_line_no = (lower_bound + upper_bound) // 2
while True:
line_start, line_end = line_ranges[start_line_no]
if span_start < line_start:
upper_bound = start_line_no - 1
elif span_start > line_end:
lower_bound = start_line_no + 1
else:
break
start_line_no = (lower_bound + upper_bound) // 2
if span_end < line_end:
end_line_no = start_line_no
else:
end_line_no = lower_bound = start_line_no
upper_bound = line_count
while True:
line_start, line_end = line_ranges[end_line_no]
if span_end < line_start:
upper_bound = end_line_no - 1
elif span_end > line_end:
lower_bound = end_line_no + 1
else:
break
end_line_no = (lower_bound + upper_bound) // 2
for line_no in range(start_line_no, end_line_no + 1):
line_start, line_end = line_ranges[line_no]
new_start = max(0, span_start - line_start)
new_end = min(span_end - line_start, line_end - line_start)
if new_end > new_start:
_line_appends[line_no](_Span(new_start, new_end, style))
return new_lines
def right_crop(self, amount: int = 1) -> None:
"""Remove a number of characters from the end of the text."""
max_offset = len(self.plain) - amount
_Span = Span
self._spans[:] = [
(
span
if span.end < max_offset
else _Span(span.start, min(max_offset, span.end), span.style)
)
for span in self._spans
if span.start < max_offset
]
self._text = [self.plain[:-amount]]
self._length -= amount
def wrap(
self,
console: "Console",
width: int,
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
tab_size: int = 8,
no_wrap: Optional[bool] = None,
) -> Lines:
"""Word wrap the text.
Args:
console (Console): Console instance.
width (int): Number of cells available per line.
justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default".
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
tab_size (int, optional): Default tab size. Defaults to 8.
no_wrap (bool, optional): Disable wrapping, Defaults to False.
Returns:
Lines: Number of lines.
"""
wrap_justify = justify or self.justify or DEFAULT_JUSTIFY
wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore"
lines = Lines()
for line in self.split(allow_blank=True):
if "\t" in line:
line.expand_tabs(tab_size)
if no_wrap:
new_lines = Lines([line])
else:
offsets = divide_line(str(line), width, fold=wrap_overflow == "fold")
new_lines = line.divide(offsets)
for line in new_lines:
line.rstrip_end(width)
if wrap_justify:
new_lines.justify(
console, width, justify=wrap_justify, overflow=wrap_overflow
)
for line in new_lines:
line.truncate(width, overflow=wrap_overflow)
lines.extend(new_lines)
return lines
def fit(self, width: int) -> Lines:
"""Fit the text in to given width by chopping in to lines.
Args:
width (int): Maximum characters in a line.
Returns:
Lines: Lines container.
"""
lines: Lines = Lines()
append = lines.append
for line in self.split():
line.set_length(width)
append(line)
return lines
def detect_indentation(self) -> int:
"""Auto-detect indentation of code.
Returns:
int: Number of spaces used to indent code.
"""
_indentations = {
len(match.group(1))
for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE)
}
try:
indentation = (
reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1
)
except TypeError:
indentation = 1
return indentation
def with_indent_guides(
self,
indent_size: Optional[int] = None,
*,
character: str = "│",
style: StyleType = "dim green",
) -> "Text":
"""Adds indent guide lines to text.
Args:
indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.
character (str, optional): Character to use for indentation. Defaults to "│".
style (Union[Style, str], optional): Style of indent guides.
Returns:
Text: New text with indentation guides.
"""
_indent_size = self.detect_indentation() if indent_size is None else indent_size
text = self.copy()
text.expand_tabs()
indent_line = f"{character}{' ' * (_indent_size - 1)}"
re_indent = re.compile(r"^( *)(.*)$")
new_lines: List[Text] = []
add_line = new_lines.append
blank_lines = 0
for line in text.split(allow_blank=True):
match = re_indent.match(line.plain)
if not match or not match.group(2):
blank_lines += 1
continue
indent = match.group(1)
full_indents, remaining_space = divmod(len(indent), _indent_size)
new_indent = f"{indent_line * full_indents}{' ' * remaining_space}"
line.plain = new_indent + line.plain[len(new_indent) :]
line.stylize(style, 0, len(new_indent))
if blank_lines:
new_lines.extend([Text(new_indent, style=style)] * blank_lines)
blank_lines = 0
add_line(line)
if blank_lines:
new_lines.extend([Text("", style=style)] * blank_lines)
new_text = text.blank_copy("\n").join(new_lines)
return new_text
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Console
text = Text(
"""\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"""
)
text.highlight_words(["Lorem"], "bold")
text.highlight_words(["ipsum"], "italic")
console = Console()
console.rule("justify='left'")
console.print(text, style="red")
console.print()
console.rule("justify='center'")
console.print(text, style="green", justify="center")
console.print()
console.rule("justify='right'")
console.print(text, style="blue", justify="right")
console.print()
console.rule("justify='full'")
console.print(text, style="magenta", justify="full")
console.print()
| Text |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/test_response_format.py | {
"start": 1262,
"end": 1688
} | class ____(TypedDict):
"""Weather response."""
temperature: float
condition: str
weather_json_schema = {
"type": "object",
"properties": {
"temperature": {"type": "number", "description": "Temperature in fahrenheit"},
"condition": {"type": "string", "description": "Weather condition"},
},
"title": "weather_schema",
"required": ["temperature", "condition"],
}
| WeatherTypedDict |
python | scrapy__scrapy | scrapy/utils/testsite.py | {
"start": 266,
"end": 690
} | class ____:
def setUp(self):
from twisted.internet import reactor
super().setUp()
self.site = reactor.listenTCP(0, test_site(), interface="127.0.0.1")
self.baseurl = f"http://localhost:{self.site.getHost().port}/"
def tearDown(self):
super().tearDown()
self.site.stopListening()
def url(self, path: str) -> str:
return urljoin(self.baseurl, path)
| SiteTest |
python | walkccc__LeetCode | solutions/843. Guess the Word/843.py | {
"start": 171,
"end": 548
} | class ____:
def findSecretWord(self, words: list[str], master: 'Master') -> None:
for _ in range(10):
guessedWord = words[random.randint(0, len(words) - 1)]
matches = master.guess(guessedWord)
if matches == 6:
break
words = [
word for word in words
if sum(c1 == c2 for c1, c2 in zip(guessedWord, word)) == matches]
| Solution |
python | pandas-dev__pandas | pandas/io/parquet.py | {
"start": 9226,
"end": 23402
} | class ____(BaseImpl):
def __init__(self) -> None:
# since pandas is a dependency of fastparquet
# we need to import on first use
fastparquet = import_optional_dependency(
"fastparquet", extra="fastparquet is required for parquet support."
)
self.api = fastparquet
def write(
self,
df: DataFrame,
path,
compression: Literal["snappy", "gzip", "brotli"] | None = "snappy",
index=None,
partition_cols=None,
storage_options: StorageOptions | None = None,
filesystem=None,
**kwargs,
) -> None:
self.validate_dataframe(df)
if "partition_on" in kwargs and partition_cols is not None:
raise ValueError(
"Cannot use both partition_on and "
"partition_cols. Use partition_cols for partitioning data"
)
if "partition_on" in kwargs:
partition_cols = kwargs.pop("partition_on")
if partition_cols is not None:
kwargs["file_scheme"] = "hive"
if filesystem is not None:
raise NotImplementedError(
"filesystem is not implemented for the fastparquet engine."
)
# cannot use get_handle as write() does not accept file buffers
path = stringify_path(path)
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
# if filesystem is provided by fsspec, file must be opened in 'wb' mode.
kwargs["open_with"] = lambda path, _: fsspec.open(
path, "wb", **(storage_options or {})
).open()
elif storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
with catch_warnings(record=True):
self.api.write(
path,
df,
compression=compression,
write_index=index,
partition_on=partition_cols,
**kwargs,
)
def read(
self,
path,
columns=None,
filters=None,
storage_options: StorageOptions | None = None,
filesystem=None,
to_pandas_kwargs: dict | None = None,
**kwargs,
) -> DataFrame:
parquet_kwargs: dict[str, Any] = {}
dtype_backend = kwargs.pop("dtype_backend", lib.no_default)
# We are disabling nullable dtypes for fastparquet pending discussion
parquet_kwargs["pandas_nulls"] = False
if dtype_backend is not lib.no_default:
raise ValueError(
"The 'dtype_backend' argument is not supported for the "
"fastparquet engine"
)
if filesystem is not None:
raise NotImplementedError(
"filesystem is not implemented for the fastparquet engine."
)
if to_pandas_kwargs is not None:
raise NotImplementedError(
"to_pandas_kwargs is not implemented for the fastparquet engine."
)
path = stringify_path(path)
handles = None
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
parquet_kwargs["fs"] = fsspec.open(path, "rb", **(storage_options or {})).fs
elif isinstance(path, str) and not os.path.isdir(path):
# use get_handle only when we are very certain that it is not a directory
# fsspec resources can also point to directories
# this branch is used for example when reading from non-fsspec URLs
handles = get_handle(
path, "rb", is_text=False, storage_options=storage_options
)
path = handles.handle
try:
parquet_file = self.api.ParquetFile(path, **parquet_kwargs)
with catch_warnings():
filterwarnings(
"ignore",
"make_block is deprecated",
Pandas4Warning,
)
return parquet_file.to_pandas(
columns=columns, filters=filters, **kwargs
)
finally:
if handles is not None:
handles.close()
@doc(storage_options=_shared_docs["storage_options"])
def to_parquet(
df: DataFrame,
path: FilePath | WriteBuffer[bytes] | None = None,
engine: str = "auto",
compression: ParquetCompressionOptions = "snappy",
index: bool | None = None,
storage_options: StorageOptions | None = None,
partition_cols: list[str] | None = None,
filesystem: Any = None,
**kwargs,
) -> bytes | None:
"""
Write a DataFrame to the parquet format.
Parameters
----------
df : DataFrame
path : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. If None, the result is
returned as bytes. If a string, it will be used as Root Directory path
when writing a partitioned dataset. The engine fastparquet does not
accept file-like objects.
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
When using the ``'pyarrow'`` engine and no storage options are provided
and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
(e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
Use the filesystem keyword with an instantiated fsspec filesystem
if you wish to use its implementation.
compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}},
default 'snappy'. Name of the compression to use. Use ``None``
for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output. If
``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
partition_cols : str or list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
{storage_options}
filesystem : fsspec or pyarrow filesystem, default None
Filesystem object to use when reading the parquet file. Only implemented
for ``engine="pyarrow"``.
.. versionadded:: 2.1.0
**kwargs
Additional keyword arguments passed to the engine:
* For ``engine="pyarrow"``: passed to :func:`pyarrow.parquet.write_table`
or :func:`pyarrow.parquet.write_to_dataset` (when using partition_cols)
* For ``engine="fastparquet"``: passed to :func:`fastparquet.write`
Returns
-------
bytes if no path argument is provided else None
"""
if isinstance(partition_cols, str):
partition_cols = [partition_cols]
impl = get_engine(engine)
path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path
impl.write(
df,
path_or_buf,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
filesystem=filesystem,
**kwargs,
)
if path is None:
assert isinstance(path_or_buf, io.BytesIO)
return path_or_buf.getvalue()
else:
return None
@set_module("pandas")
@doc(storage_options=_shared_docs["storage_options"])
def read_parquet(
path: FilePath | ReadBuffer[bytes],
engine: str = "auto",
columns: list[str] | None = None,
storage_options: StorageOptions | None = None,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
filesystem: Any = None,
filters: list[tuple] | list[list[tuple]] | None = None,
to_pandas_kwargs: dict | None = None,
**kwargs,
) -> DataFrame:
"""
Load a parquet object from the file path, returning a DataFrame.
The function automatically handles reading the data from a parquet file
and creates a DataFrame with the appropriate structure.
Parameters
----------
path : str, path object or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gs, and file. For file URLs, a host is expected. A local file could be:
``file://localhost/path/to/table.parquet``.
A file URL can also be a path to a directory that contains multiple
partitioned parquet files. Both pyarrow and fastparquet support
paths to directories as well as file URLs. A directory path could be:
``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``.
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
When using the ``'pyarrow'`` engine and no storage options are provided
and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec``
(e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first.
Use the filesystem keyword with an instantiated fsspec filesystem
if you wish to use its implementation.
columns : list, default=None
If not None, only these columns will be read from the file.
{storage_options}
dtype_backend : {{'numpy_nullable', 'pyarrow'}}
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). If not specified, the default behavior
is to not use nullable data types. If specified, the behavior
is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
* ``"pyarrow"``: returns pyarrow-backed nullable
:class:`ArrowDtype` :class:`DataFrame`
.. versionadded:: 2.0
filesystem : fsspec or pyarrow filesystem, default None
Filesystem object to use when reading the parquet file. Only implemented
for ``engine="pyarrow"``.
.. versionadded:: 2.1.0
filters : List[Tuple] or List[List[Tuple]], default None
To filter out data.
Filter syntax: [[(column, op, val), ...],...]
where op is [==, =, >, >=, <, <=, !=, in, not in]
The innermost tuples are transposed into a set of filters applied
through an `AND` operation.
The outer list combines these sets of filters through an `OR`
operation.
A single list of tuples can also be used, meaning that no `OR`
operation between set of filters is to be conducted.
Using this argument will NOT result in row-wise filtering of the final
partitions unless ``engine="pyarrow"`` is also specified. For
other engines, filtering is only performed at the partition level, that is,
to prevent the loading of some row-groups and/or files.
.. versionadded:: 2.1.0
to_pandas_kwargs : dict | None, default None
Keyword arguments to pass through to :func:`pyarrow.Table.to_pandas`
when ``engine="pyarrow"``.
.. versionadded:: 3.0.0
**kwargs
Additional keyword arguments passed to the engine:
* For ``engine="pyarrow"``: passed to :func:`pyarrow.parquet.read_table`
* For ``engine="fastparquet"``: passed to
:meth:`fastparquet.ParquetFile.to_pandas`
Returns
-------
DataFrame
DataFrame based on parquet file.
See Also
--------
DataFrame.to_parquet : Create a parquet object that serializes a DataFrame.
Examples
--------
>>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> df_parquet_bytes = original_df.to_parquet()
>>> from io import BytesIO
>>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes))
>>> restored_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> restored_df.equals(original_df)
True
>>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"])
>>> restored_bar
bar
0 5
1 6
2 7
3 8
4 9
>>> restored_bar.equals(original_df[["bar"]])
True
The function uses `kwargs` that are passed directly to the engine.
In the following example, we use the `filters` argument of the pyarrow
engine to filter the rows of the DataFrame.
Since `pyarrow` is the default engine, we can omit the `engine` argument.
Note that the `filters` argument is implemented by the `pyarrow` engine,
which can benefit from multithreading and also potentially be more
economical in terms of memory.
>>> sel = [("foo", ">", 2)]
>>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel)
>>> restored_part
foo bar
0 3 8
1 4 9
"""
impl = get_engine(engine)
check_dtype_backend(dtype_backend)
return impl.read(
path,
columns=columns,
filters=filters,
storage_options=storage_options,
dtype_backend=dtype_backend,
filesystem=filesystem,
to_pandas_kwargs=to_pandas_kwargs,
**kwargs,
)
| FastParquetImpl |
python | mlflow__mlflow | mlflow/tracing/processor/mlflow_v3.py | {
"start": 480,
"end": 1958
} | class ____(BaseMlflowSpanProcessor):
"""
Defines custom hooks to be executed when a span is started or ended (before exporting).
This processor is used for exporting traces to MLflow Tracking Server
using the V3 trace schema and API.
"""
def __init__(
self,
span_exporter: SpanExporter,
export_metrics: bool,
):
super().__init__(span_exporter, export_metrics)
def _start_trace(self, root_span: OTelSpan) -> TraceInfo:
"""
Create a new TraceInfo object and register it with the trace manager.
This method is called in the on_start method of the base class.
"""
experiment_id = get_experiment_id_for_trace(root_span)
if experiment_id is None:
_logger.debug(
"Experiment ID is not set for trace. It may not be exported to MLflow backend."
)
trace_info = TraceInfo(
trace_id=generate_trace_id_v3(root_span),
trace_location=TraceLocation.from_experiment_id(experiment_id),
request_time=root_span.start_time // 1_000_000, # nanosecond to millisecond
execution_duration=None,
state=TraceState.IN_PROGRESS,
trace_metadata=self._get_basic_trace_metadata(),
tags=self._get_basic_trace_tags(root_span),
)
self._trace_manager.register_trace(root_span.context.trace_id, trace_info)
return trace_info
| MlflowV3SpanProcessor |
python | Farama-Foundation__Gymnasium | tests/wrappers/test_jax_to_torch.py | {
"start": 1234,
"end": 5355
} | class ____(NamedTuple):
a: torch.Tensor
b: torch.Tensor
# Unless jax_enable_x64 is set at startup, jax will prevent us from creating double precision
# arrays. Therefore, all arrays are expected to be single precision after a roundtrip.
@pytest.mark.parametrize(
"value, expected_value",
[
(1.0, torch.tensor(1.0)),
(2, torch.tensor(2, dtype=torch.int32)),
((3.0, 4), (torch.tensor(3.0), torch.tensor(4, dtype=torch.int32))),
([3.0, 4], [torch.tensor(3.0), torch.tensor(4, dtype=torch.int32)]),
(
{
"a": 6.0,
"b": 7,
},
{"a": torch.tensor(6.0), "b": torch.tensor(7, dtype=torch.int32)},
),
(torch.tensor(1.0), torch.tensor(1.0)),
(torch.tensor(1), torch.tensor(1, dtype=torch.int32)),
(torch.tensor([1, 2]), torch.tensor([1, 2], dtype=torch.int32)),
(
torch.tensor([[1.0], [2.0]]),
torch.tensor([[1.0], [2.0]]),
),
(
{
"a": (
1,
torch.tensor(2.0),
torch.tensor([3, 4]),
),
"b": {"c": 5},
},
{
"a": (
torch.tensor(1, dtype=torch.int32),
torch.tensor(2.0),
torch.tensor([3, 4], dtype=torch.int32),
),
"b": {"c": torch.tensor(5, dtype=torch.int32)},
},
),
(
ExampleNamedTuple(
a=torch.tensor([1, 2]),
b=torch.tensor([1.0, 2.0]),
),
ExampleNamedTuple(
a=torch.tensor([1, 2], dtype=torch.int32),
b=torch.tensor([1.0, 2.0]),
),
),
(None, None),
],
)
def test_roundtripping(value, expected_value):
"""We test numpy -> jax -> numpy as this is direction in the NumpyToJax wrapper."""
roundtripped_value = jax_to_torch(torch_to_jax(value))
assert torch_data_equivalence(roundtripped_value, expected_value)
def _jax_reset_func(self, seed=None, options=None):
return jnp.array([1.0, 2.0, 3.0]), {"data": jnp.array([1, 2, 3])}
def _jax_step_func(self, action):
assert isinstance(action, jax.Array), type(action)
return (
jnp.array([1, 2, 3]),
jnp.array(5.0),
jnp.array(True),
jnp.array(False),
{"data": jnp.array([1.0, 2.0])},
)
def test_jax_to_torch_wrapper():
"""Tests the `JaxToTorchV0` wrapper."""
env = GenericTestEnv(reset_func=_jax_reset_func, step_func=_jax_step_func)
# Check that the reset and step for jax environment are as expected
obs, info = env.reset()
assert isinstance(obs, jax.Array)
assert isinstance(info, dict) and isinstance(info["data"], jax.Array)
obs, reward, terminated, truncated, info = env.step(jnp.array([1, 2]))
assert isinstance(obs, jax.Array)
assert isinstance(reward, jax.Array)
assert isinstance(terminated, jax.Array) and isinstance(truncated, jax.Array)
assert isinstance(info, dict) and isinstance(info["data"], jax.Array)
# Check that the wrapped version is correct.
wrapped_env = JaxToTorch(env)
obs, info = wrapped_env.reset()
assert isinstance(obs, torch.Tensor)
assert isinstance(info, dict) and isinstance(info["data"], torch.Tensor)
obs, reward, terminated, truncated, info = wrapped_env.step(torch.tensor([1, 2]))
assert isinstance(obs, torch.Tensor)
assert isinstance(reward, float)
assert isinstance(terminated, bool) and isinstance(truncated, bool)
assert isinstance(info, dict) and isinstance(info["data"], torch.Tensor)
# Check that the wrapped environment can render. This implicitly returns None and requires a
# None -> None conversion
wrapped_env.render()
# Test that the wrapped environment can be pickled
env = gymnasium.make("CartPole-v1", disable_env_checker=True)
wrapped_env = JaxToTorch(env)
pkl = pickle.dumps(wrapped_env)
pickle.loads(pkl)
| ExampleNamedTuple |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events_uptime_results.py | {
"start": 229,
"end": 12656
} | class ____(
OrganizationEventsEndpointTestBase, UptimeResultEAPTestCase
):
dataset = "uptime_results"
def build_expected_result(self, **kwargs):
return {"project.name": None, **kwargs}
@pytest.mark.querybuilder
def test_simple_uptime_query(self) -> None:
results = [
self.create_eap_uptime_result(
check_status="success",
http_status_code=200,
region="us-east-1",
scheduled_check_time=self.ten_mins_ago,
),
self.create_eap_uptime_result(
check_status="failure",
http_status_code=500,
region="us-west-2",
scheduled_check_time=self.nine_mins_ago,
),
]
self.store_uptime_results(results)
response = self.do_request(
{
"field": ["check_status", "http_status_code", "region"],
"query": "",
"orderBy": "timestamp",
"project": self.project.id,
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert response.data["meta"]["dataset"] == self.dataset
assert data == [
self.build_expected_result(
id=results[0].item_id.hex(),
check_status="success",
http_status_code=200,
region="us-east-1",
),
self.build_expected_result(
id=results[1].item_id.hex(),
check_status="failure",
http_status_code=500,
region="us-west-2",
),
]
@pytest.mark.querybuilder
def test_status_filter_query(self) -> None:
results = [
self.create_eap_uptime_result(
check_status="success",
http_status_code=200,
scheduled_check_time=self.ten_mins_ago,
),
self.create_eap_uptime_result(
check_status="failure",
http_status_code=500,
scheduled_check_time=self.nine_mins_ago,
),
self.create_eap_uptime_result(
check_status="success",
http_status_code=201,
scheduled_check_time=self.nine_mins_ago,
),
]
self.store_uptime_results(results)
response = self.do_request(
{
"field": ["check_status", "http_status_code"],
"query": "check_status:success",
"orderBy": "http_status_code",
"project": self.project.id,
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data == [
self.build_expected_result(
id=results[0].item_id.hex(), check_status="success", http_status_code=200
),
self.build_expected_result(
id=results[2].item_id.hex(), check_status="success", http_status_code=201
),
]
@pytest.mark.querybuilder
def test_timing_fields_query(self) -> None:
results = [
self.create_eap_uptime_result(
check_status="success",
check_duration_us=150000,
request_duration_us=125000,
dns_lookup_duration_us=25000,
tcp_connection_duration_us=15000,
scheduled_check_time=self.ten_mins_ago,
),
self.create_eap_uptime_result(
check_status="failure",
check_duration_us=30000000,
request_duration_us=30000000,
dns_lookup_duration_us=200000,
tcp_connection_duration_us=25000,
scheduled_check_time=self.nine_mins_ago,
),
]
self.store_uptime_results(results)
response = self.do_request(
{
"field": [
"check_status",
"check_duration_us",
"request_duration_us",
"dns_lookup_duration_us",
"tcp_connection_duration_us",
],
"query": "",
"orderBy": "check_duration_us",
"project": self.project.id,
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data == [
self.build_expected_result(
id=results[0].item_id.hex(),
check_status="success",
check_duration_us=150000,
request_duration_us=125000,
dns_lookup_duration_us=25000,
tcp_connection_duration_us=15000,
),
self.build_expected_result(
id=results[1].item_id.hex(),
check_status="failure",
check_duration_us=30000000,
request_duration_us=30000000,
dns_lookup_duration_us=200000,
tcp_connection_duration_us=25000,
),
]
@pytest.mark.querybuilder
def test_cross_level_filter_query(self) -> None:
results = [
self.create_eap_uptime_result(
check_status="success",
http_status_code=200,
dns_lookup_duration_us=15000,
region="us-east-1",
scheduled_check_time=self.ten_mins_ago,
),
self.create_eap_uptime_result(
check_status="failure",
http_status_code=504,
dns_lookup_duration_us=150000,
region="us-east-1",
scheduled_check_time=self.nine_mins_ago,
),
self.create_eap_uptime_result(
check_status="failure",
http_status_code=500,
dns_lookup_duration_us=20000,
region="us-west-2",
scheduled_check_time=self.nine_mins_ago,
),
]
self.store_uptime_results(results)
response = self.do_request(
{
"field": ["check_status", "http_status_code", "dns_lookup_duration_us", "region"],
"query": "check_status:failure AND dns_lookup_duration_us:>100000",
"orderBy": "dns_lookup_duration_us",
"project": self.project.id,
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data == [
self.build_expected_result(
id=results[1].item_id.hex(),
check_status="failure",
http_status_code=504,
dns_lookup_duration_us=150000,
region="us-east-1",
),
]
@pytest.mark.querybuilder
def test_redirect_sequence_query(self) -> None:
"""Test querying redirect chains using request_sequence."""
check_id = uuid4().hex
trace_id = uuid4().hex
results = [
self.create_eap_uptime_result(
check_id=check_id,
request_sequence=0,
check_status="success",
http_status_code=301,
request_url="http://example.com",
trace_id=trace_id,
scheduled_check_time=self.ten_mins_ago,
),
self.create_eap_uptime_result(
check_id=check_id,
request_sequence=1,
check_status="success",
http_status_code=200,
request_url="https://example.com",
trace_id=trace_id,
scheduled_check_time=self.ten_mins_ago,
),
self.create_eap_uptime_result(
check_id=uuid4().hex,
request_sequence=0,
check_status="success",
http_status_code=200,
request_url="https://other.com",
scheduled_check_time=self.nine_mins_ago,
),
]
self.store_uptime_results(results)
response = self.do_request(
{
"field": ["check_id", "request_sequence", "http_status_code", "request_url"],
"query": "request_sequence:>0",
"orderBy": "request_sequence",
"project": self.project.id,
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data == [
self.build_expected_result(
id=results[1].item_id.hex(),
check_id=check_id,
request_sequence=1,
http_status_code=200,
request_url="https://example.com",
),
]
@pytest.mark.querybuilder
def test_region_and_status_combination(self) -> None:
results = [
self.create_eap_uptime_result(
check_status="success",
region="us-east-1",
http_status_code=200,
scheduled_check_time=self.ten_mins_ago,
),
self.create_eap_uptime_result(
check_status="failure",
region="us-east-1",
http_status_code=500,
scheduled_check_time=self.nine_mins_ago,
),
self.create_eap_uptime_result(
check_status="success",
region="us-west-2",
http_status_code=200,
scheduled_check_time=self.nine_mins_ago,
),
self.create_eap_uptime_result(
check_status="failure",
region="us-west-2",
http_status_code=503,
scheduled_check_time=self.nine_mins_ago,
),
]
self.store_uptime_results(results)
response = self.do_request(
{
"field": ["check_status", "region", "http_status_code"],
"query": "region:us-east-1 AND check_status:failure",
"orderBy": "http_status_code",
"project": self.project.id,
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
assert data == [
self.build_expected_result(
id=results[1].item_id.hex(),
check_status="failure",
region="us-east-1",
http_status_code=500,
),
]
@pytest.mark.querybuilder
def test_timestamp_precision(self) -> None:
"""Test that timestamp precision is maintained in queries."""
base_time = self.ten_mins_ago
results = [
self.create_eap_uptime_result(
check_status="success",
guid="check-1",
scheduled_check_time=base_time,
),
self.create_eap_uptime_result(
check_status="success",
guid="check-2",
scheduled_check_time=base_time + timedelta(microseconds=1),
),
self.create_eap_uptime_result(
check_status="success",
guid="check-3",
scheduled_check_time=base_time + timedelta(microseconds=2),
),
]
self.store_uptime_results(results)
response = self.do_request(
{
"field": ["guid", "timestamp"],
"query": "",
"orderBy": "timestamp",
"project": self.project.id,
"dataset": self.dataset,
}
)
assert response.status_code == 200, response.content
data = response.data["data"]
guids = {result["guid"] for result in data}
assert guids == {"check-1", "check-2", "check-3"}
for result in data:
assert result["timestamp"] is not None
assert "T" in result["timestamp"]
| OrganizationEventsUptimeResultsEndpointTest |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_tm_future_annotations_sync.py | {
"start": 153175,
"end": 154390
} | class ____(fixtures.TestBase):
def _assertions(self, A, B, lazy):
is_(A.bs.property.mapper, B.__mapper__)
is_true(A.bs.property.uselist)
eq_(A.bs.property.lazy, lazy)
def test_dynamic(self, decl_base):
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
a_id: Mapped[int] = mapped_column(
ForeignKey("a.id", ondelete="cascade")
)
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
bs: DynamicMapped[B] = relationship()
self._assertions(A, B, "dynamic")
def test_write_only(self, decl_base):
class B(decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
a_id: Mapped[int] = mapped_column(
ForeignKey("a.id", ondelete="cascade")
)
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
bs: WriteOnlyMapped[B] = relationship()
self._assertions(A, B, "write_only")
| WriteOnlyRelationshipTest |
python | Textualize__textual | docs/examples/guide/layout/horizontal_layout_overflow.py | {
"start": 80,
"end": 416
} | class ____(App):
CSS_PATH = "horizontal_layout_overflow.tcss"
def compose(self) -> ComposeResult:
yield Static("One", classes="box")
yield Static("Two", classes="box")
yield Static("Three", classes="box")
if __name__ == "__main__":
app = HorizontalLayoutExample()
app.run()
| HorizontalLayoutExample |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 12298,
"end": 13192
} | class ____:
root_module: str
defs_module: Optional[str] = None
code_location_target_module: Optional[str] = None
code_location_name: Optional[str] = None
registry_modules: list[str] = field(default_factory=list)
@classmethod
def from_raw(cls, raw: "DgRawProjectConfig") -> Self:
return cls(
root_module=raw["root_module"],
defs_module=raw.get("defs_module", DgProjectConfig.defs_module),
code_location_name=raw.get("code_location_name", DgProjectConfig.code_location_name),
code_location_target_module=raw.get(
"code_location_target_module",
DgProjectConfig.code_location_target_module,
),
registry_modules=raw.get(
"registry_modules", cls.__dataclass_fields__["registry_modules"].default_factory()
),
)
| DgProjectConfig |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/service.py | {
"start": 4362,
"end": 4636
} | class ____:
region: Region | None
is_early_halt: bool = False
def __post_init__(self) -> None:
if (self.region is None) != self.is_early_halt:
raise ValueError("region must be supplied if and only if not halting early")
| _RegionResolutionResult |
python | coleifer__peewee | tests/pool.py | {
"start": 542,
"end": 941
} | class ____(_transaction):
def _add_history(self, message):
self.db.transaction_history.append(
'%s%s' % (message, self._conn))
def __enter__(self):
self._conn = self.db.connection()
self._add_history('O')
self.db.push_transaction(self)
def __exit__(self, *args):
self._add_history('X')
self.db.pop_transaction()
| FakeTransaction |
python | ray-project__ray | python/ray/serve/_private/benchmarks/common.py | {
"start": 4585,
"end": 4767
} | class ____:
def __init__(self):
logging.getLogger("ray.serve").setLevel(logging.WARNING)
def __call__(self, *args, **kwargs):
return b""
@serve.deployment
| Noop |
python | coleifer__peewee | tests/sqlite.py | {
"start": 62700,
"end": 65835
} | class ____(ModelTestCase):
database = database
requires = [Post, Values]
def test_custom_agg(self):
data = (
(1, 3.4, 1.0),
(1, 6.4, 2.3),
(1, 4.3, 0.9),
(2, 3.4, 1.4),
(3, 2.7, 1.1),
(3, 2.5, 1.1),
)
for klass, value, wt in data:
Values.create(klass=klass, value=value, weight=wt)
vq = (Values
.select(
Values.klass,
fn.weighted_avg(Values.value).alias('wtavg'),
fn.avg(Values.value).alias('avg'))
.group_by(Values.klass))
q_data = [(v.klass, v.wtavg, v.avg) for v in vq]
self.assertEqual(q_data, [
(1, 4.7, 4.7),
(2, 3.4, 3.4),
(3, 2.6, 2.6)])
vq = (Values
.select(
Values.klass,
fn.weighted_avg2(Values.value, Values.weight).alias('wtavg'),
fn.avg(Values.value).alias('avg'))
.group_by(Values.klass))
q_data = [(v.klass, str(v.wtavg)[:4], v.avg) for v in vq]
self.assertEqual(q_data, [
(1, '5.23', 4.7),
(2, '3.4', 3.4),
(3, '2.6', 2.6)])
def test_custom_collation(self):
for i in [1, 4, 3, 5, 2]:
Post.create(message='p%d' % i)
pq = Post.select().order_by(NodeList((Post.message, SQL('collate collate_reverse'))))
self.assertEqual([p.message for p in pq], ['p5', 'p4', 'p3', 'p2', 'p1'])
def test_collation_decorator(self):
posts = [Post.create(message=m) for m in ['aaa', 'Aab', 'ccc', 'Bba', 'BbB']]
pq = Post.select().order_by(collate_case_insensitive.collation(Post.message))
self.assertEqual([p.message for p in pq], [
'aaa',
'Aab',
'Bba',
'BbB',
'ccc'])
def test_custom_function(self):
p1 = Post.create(message='this is a test')
p2 = Post.create(message='another TEST')
sq = Post.select().where(fn.title_case(Post.message) == 'This Is A Test')
self.assertEqual(list(sq), [p1])
sq = Post.select(fn.title_case(Post.message)).tuples()
self.assertEqual([x[0] for x in sq], [
'This Is A Test',
'Another Test',
])
def test_function_decorator(self):
[Post.create(message=m) for m in ['testing', 'chatting ', ' foo']]
pq = Post.select(fn.rstrip(Post.message, 'ing')).order_by(Post.id)
self.assertEqual([x[0] for x in pq.tuples()], [
'test', 'chatting ', ' foo'])
pq = Post.select(fn.rstrip(Post.message, ' ')).order_by(Post.id)
self.assertEqual([x[0] for x in pq.tuples()], [
'testing', 'chatting', ' foo'])
def test_use_across_connections(self):
db = get_in_memory_db()
@db.func()
def rev(s):
return s[::-1]
db.connect(); db.close(); db.connect()
curs = db.execute_sql('select rev(?)', ('hello',))
self.assertEqual(curs.fetchone(), ('olleh',))
| TestUserDefinedCallbacks |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 18308,
"end": 22782
} | class ____:
def test_basic(self):
a = [1, 2, 3]
assert_equal(insert(a, 0, 1), [1, 1, 2, 3])
assert_equal(insert(a, 3, 1), [1, 2, 3, 1])
assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9])
assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3])
assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9])
b = np.array([0, 1], dtype=np.float64)
assert_equal(insert(b, 0, b[0]), [0., 0., 1.])
assert_equal(insert(b, [], []), b)
assert_equal(insert(a, np.array([True] * 4), 9), [9, 1, 9, 2, 9, 3, 9])
assert_equal(insert(a, np.array([True, False, True, False]), 9),
[9, 1, 2, 9, 3])
def test_multidim(self):
a = [[1, 1, 1]]
r = [[2, 2, 2],
[1, 1, 1]]
assert_equal(insert(a, 0, [1]), [1, 1, 1, 1])
assert_equal(insert(a, 0, [2, 2, 2], axis=0), r)
assert_equal(insert(a, 0, 2, axis=0), r)
assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]])
a = np.array([[1, 1], [2, 2], [3, 3]])
b = np.arange(1, 4).repeat(3).reshape(3, 3)
c = np.concatenate(
(a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T,
a[:, 1:2]), axis=1)
assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b)
assert_equal(insert(a, [1], [1, 2, 3], axis=1), c)
# scalars behave differently, in this case exactly opposite:
assert_equal(insert(a, 1, [1, 2, 3], axis=1), b)
assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c)
a = np.arange(4).reshape(2, 2)
assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a)
assert_equal(insert(a[:1, :], 1, a[1, :], axis=0), a)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
assert_equal(insert(a, 1, a[:, :, 3], axis=-1),
insert(a, 1, a[:, :, 3], axis=2))
assert_equal(insert(a, 1, a[:, 2, :], axis=-2),
insert(a, 1, a[:, 2, :], axis=1))
# invalid axis value
assert_raises(AxisError, insert, a, 1, a[:, 2, :], axis=3)
assert_raises(AxisError, insert, a, 1, a[:, 2, :], axis=-4)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
assert_equal(insert(a, 1, a[:, :, 3], axis=-1),
insert(a, 1, a[:, :, 3], axis=2))
assert_equal(insert(a, 1, a[:, 2, :], axis=-2),
insert(a, 1, a[:, 2, :], axis=1))
def test_0d(self):
a = np.array(1)
with pytest.raises(AxisError):
insert(a, [], 2, axis=0)
with pytest.raises(TypeError):
insert(a, [], 2, axis="nonsense")
def test_subclass(self):
class SubClass(np.ndarray):
pass
a = np.arange(10).view(SubClass)
assert_(isinstance(np.insert(a, 0, [0]), SubClass))
assert_(isinstance(np.insert(a, [], []), SubClass))
assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass))
assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass))
assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass))
# This is an error in the future:
a = np.array(1).view(SubClass)
assert_(isinstance(np.insert(a, 0, [0]), SubClass))
def test_index_array_copied(self):
x = np.array([1, 1, 1])
np.insert([0, 1, 2], x, [3, 4, 5])
assert_equal(x, np.array([1, 1, 1]))
def test_structured_array(self):
a = np.array([(1, 'a'), (2, 'b'), (3, 'c')],
dtype=[('foo', 'i'), ('bar', 'S1')])
val = (4, 'd')
b = np.insert(a, 0, val)
assert_array_equal(b[0], np.array(val, dtype=b.dtype))
val = [(4, 'd')] * 2
b = np.insert(a, [0, 2], val)
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
def test_index_floats(self):
with pytest.raises(IndexError):
np.insert([0, 1, 2], np.array([1.0, 2.0]), [10, 20])
with pytest.raises(IndexError):
np.insert([0, 1, 2], np.array([], dtype=float), [])
@pytest.mark.parametrize('idx', [4, -4])
def test_index_out_of_bounds(self, idx):
with pytest.raises(IndexError, match='out of bounds'):
np.insert([0, 1, 2], [idx], [3, 4])
| TestInsert |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 1346,
"end": 1596
} | class ____(models.Model):
question = models.CharField(max_length=200)
pub_date = models.DateTimeField("date published")
place = models.TextField(null=True)
history = HistoricalRecords(excluded_fields=["pub_date"])
| PollWithExcludeFields |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_scalarmath.py | {
"start": 5390,
"end": 8998
} | class ____(TestCase):
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a**4
assert_(b == 81, f"error with {t!r}: got {b!r}")
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64]:
a = t(51)
b = a**4
msg = f"error with {t!r}: got {b!r}"
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
@skip(reason="NP_VER: fails on CI on older NumPy")
@xpassIfTorchDynamo_np # (reason="Value-based casting: (2)**(-2) -> 0 in pytorch.")
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in "bhil"]
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in "bhilB"]
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.0)
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in "bhil"]
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.0)
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in "bhilB"]
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 0.5)
def test_mixed_types(self):
typelist = [
np.int8,
np.int16,
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = f"error with {t1!r} and {t2!r}:got {result!r}, expected {9!r}"
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
# modular power is not implemented, so ensure it errors
a = 5
b = 4
c = 10
expected = pow(a, b, c) # noqa: F841
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes["UnsignedInteger"]:
return (+1,)
else:
return (+1, -1)
@instantiate_parametrized_tests
| TestPower |
python | sympy__sympy | sympy/core/numbers.py | {
"start": 92861,
"end": 94341
} | class ____(IntegerConstant, metaclass=Singleton):
"""The number zero.
Zero is a singleton, and can be accessed by ``S.Zero``
Examples
========
>>> from sympy import S, Integer
>>> Integer(0) is S.Zero
True
>>> 1/S.Zero
zoo
References
==========
.. [1] https://en.wikipedia.org/wiki/Zero
"""
p = 0
q = 1
is_positive = False
is_negative = False
is_zero = True
is_number = True
is_comparable = True
__slots__ = ()
def __getnewargs__(self):
return ()
@staticmethod
def __abs__():
return S.Zero
@staticmethod
def __neg__():
return S.Zero
def _eval_power(self, expt):
if expt.is_extended_positive:
return self
if expt.is_extended_negative:
return S.ComplexInfinity
if expt.is_extended_real is False:
return S.NaN
if expt.is_zero:
return S.One
# infinities are already handled with pos and neg
# tests above; now throw away leading numbers on Mul
# exponent since 0**-x = zoo**x even when x == 0
coeff, terms = expt.as_coeff_Mul()
if coeff.is_negative:
return S.ComplexInfinity**terms
if coeff is not S.One: # there is a Number to discard
return self**terms
def _eval_order(self, *symbols):
# Order(0,x) -> 0
return self
def __bool__(self):
return False
| Zero |
python | streamlit__streamlit | e2e_playwright/st_write_objects.py | {
"start": 1470,
"end": 1812
} | class ____:
def _repr_html_(self) -> str:
return "This is an <b>HTML tag</b>!"
# Shows as st.help because this is just an object.
st.write(ClassWithReprHtml())
# Shows as HTML.
st.write(ClassWithReprHtml(), unsafe_allow_html=True)
st.subheader("st.write(exception)")
st.write(Exception("This is an exception!"))
| ClassWithReprHtml |
python | huggingface__transformers | src/transformers/trainer_pt_utils.py | {
"start": 25327,
"end": 43220
} | class ____(IterableDataset):
"""
Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
always yield a number of samples that is a round multiple of the actual batch size (which is `batch_size x
num_processes`). Depending on the value of the `drop_last` attribute, it will either stop the iteration at the
first batch that would be too small or loop with indices from the beginning.
On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]` with a batch size of
2:
- the shard on process 0 will yield `[0, 1, 4, 5, 8, 9]` so will see batches `[0, 1]`, `[4, 5]`, `[8, 9]`
- the shard on process 1 will yield `[2, 3, 6, 7, 10, 11]` so will see batches `[2, 3]`, `[6, 7]`, `[10, 11]`
<Tip warning={true}>
If your IterableDataset implements some randomization that needs to be applied the same way on all processes
(for instance, a shuffling), you should use a `torch.Generator` in a `generator` attribute of the `dataset` to
generate your random numbers and call the [`~trainer_pt_utils.IterableDatasetShard.set_epoch`] method of this
object. It will set the seed of this `generator` to `seed + epoch` on all processes before starting the
iteration. Alternatively, you can also implement a `set_epoch()` method in your iterable dataset to deal with
this.
</Tip>
Args:
dataset (`torch.utils.data.IterableDataset`):
The batch sampler to split in several shards.
batch_size (`int`, *optional*, defaults to 1):
The size of the batches per shard.
drop_last (`bool`, *optional*, defaults to `False`):
Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
beginning.
num_processes (`int`, *optional*, defaults to 1):
The number of processes running concurrently.
process_index (`int`, *optional*, defaults to 0):
The index of the current process.
seed (`int`, *optional*, defaults to 0):
A random seed that will be used for the random number generation in
[`~trainer_pt_utils.IterableDatasetShard.set_epoch`].
"""
def __init__(
self,
dataset: IterableDataset,
batch_size: int = 1,
drop_last: bool = False,
num_processes: int = 1,
process_index: int = 0,
seed: int = 0,
):
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
self.num_processes = num_processes
self.process_index = process_index
self.seed = seed
self.epoch = 0
self.num_examples = 0
def set_epoch(self, epoch):
self.epoch = epoch
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(epoch)
def __iter__(self):
self.num_examples = 0
if (
not hasattr(self.dataset, "set_epoch")
and hasattr(self.dataset, "generator")
and isinstance(self.dataset.generator, torch.Generator)
):
self.dataset.generator.manual_seed(self.seed + self.epoch)
real_batch_size = self.batch_size * self.num_processes
process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size)
first_batch = None
current_batch = []
for element in self.dataset:
self.num_examples += 1
current_batch.append(element)
# Wait to have a full batch before yielding elements.
if len(current_batch) == real_batch_size:
for i in process_slice:
yield current_batch[i]
if first_batch is None:
first_batch = current_batch.copy()
current_batch = []
# Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
if not self.drop_last and len(current_batch) > 0:
if first_batch is None:
first_batch = current_batch.copy()
while len(current_batch) < real_batch_size:
current_batch += first_batch
for i in process_slice:
yield current_batch[i]
def __len__(self):
# Will raise an error if the underlying dataset is not sized.
if self.drop_last:
return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
else:
return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
# In order to keep `trainer.py` compact and easy to understand, place any secondary PT Trainer
# helper methods here
def _get_learning_rate(self):
if self.is_deepspeed_enabled:
# with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may
# not run for the first few dozen steps while loss scale is too large, and thus during
# that time `get_last_lr` will fail if called during that warm up stage, so work around it:
try:
last_lr = self.lr_scheduler.get_last_lr()[0]
except AssertionError as e:
if "need to call step" in str(e):
logger.warning("tried to get lr value before scheduler/optimizer started stepping, returning lr=0")
last_lr = 0
else:
raise
else:
if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
last_lr = self.optimizer.param_groups[0]["lr"]
else:
last_lr = self.lr_scheduler.get_last_lr()[0]
if torch.is_tensor(last_lr):
last_lr = last_lr.item()
return last_lr
def _secs2timedelta(secs):
"""
Convert seconds to hh:mm:ss.msec, msecs rounded to 2 decimal places.
"""
msec = int(abs(secs - int(secs)) * 100)
return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}"
def metrics_format(metrics: dict[str, float]) -> dict[str, float]:
"""
Reformat Trainer metrics values to a human-readable format.
Args:
metrics (`dict[str, float]`):
The metrics returned from train/evaluate/predict
Returns:
metrics (`dict[str, float]`): The reformatted metrics
"""
metrics_copy = metrics.copy()
for k, v in metrics_copy.items():
if "_mem_" in k:
metrics_copy[k] = f"{v >> 20}MB"
elif "_runtime" in k:
metrics_copy[k] = _secs2timedelta(v)
elif k == "total_flos":
metrics_copy[k] = f"{int(v) >> 30}GF"
elif isinstance(metrics_copy[k], float):
metrics_copy[k] = round(v, 4)
return metrics_copy
def log_metrics(self, split, metrics):
"""
Log metrics in a specially formatted way.
Under distributed environment this is done only for a process with rank 0.
Args:
split (`str`):
Mode/split name: one of `train`, `eval`, `test`
metrics (`dict[str, float]`):
The metrics returned from train/evaluate/predictmetrics: metrics dict
Notes on memory reports:
In order to get memory usage report you need to install `psutil`. You can do that with `pip install psutil`.
Now when this method is run, you will see a report that will include:
```
init_mem_cpu_alloc_delta = 1301MB
init_mem_cpu_peaked_delta = 154MB
init_mem_gpu_alloc_delta = 230MB
init_mem_gpu_peaked_delta = 0MB
train_mem_cpu_alloc_delta = 1345MB
train_mem_cpu_peaked_delta = 0MB
train_mem_gpu_alloc_delta = 693MB
train_mem_gpu_peaked_delta = 7MB
```
**Understanding the reports:**
- the first segment, e.g., `train__`, tells you which stage the metrics are for. Reports starting with `init_`
will be added to the first stage that gets run. So that if only evaluation is run, the memory usage for the
`__init__` will be reported along with the `eval_` metrics.
- the third segment, is either `cpu` or `gpu`, tells you whether it's the general RAM or the gpu0 memory
metric.
- `*_alloc_delta` - is the difference in the used/allocated memory counter between the end and the start of the
stage - it can be negative if a function released more memory than it allocated.
- `*_peaked_delta` - is any extra memory that was consumed and then freed - relative to the current allocated
memory counter - it is never negative. When you look at the metrics of any stage you add up `alloc_delta` +
`peaked_delta` and you know how much memory was needed to complete that stage.
The reporting happens only for process of rank 0 and gpu 0 (if there is a gpu). Typically this is enough since the
main process does the bulk of work, but it could be not quite so if model parallel is used and then other GPUs may
use a different amount of gpu memory. This is also not the same under DataParallel where gpu0 may require much more
memory than the rest since it stores the gradient and optimizer states for all participating GPUs. Perhaps in the
future these reports will evolve to measure those too.
The CPU RAM metric measures RSS (Resident Set Size) includes both the memory which is unique to the process and the
memory shared with other processes. It is important to note that it does not include swapped out memory, so the
reports could be imprecise.
The CPU peak memory is measured using a sampling thread. Due to python's GIL it may miss some of the peak memory if
that thread didn't get a chance to run when the highest memory was used. Therefore this report can be less than
reality. Using `tracemalloc` would have reported the exact peak memory, but it doesn't report memory allocations
outside of python. So if some C++ CUDA extension allocated its own memory it won't be reported. And therefore it
was dropped in favor of the memory sampling approach, which reads the current process memory usage.
The GPU allocated and peak memory reporting is done with `torch.cuda.memory_allocated()` and
`torch.cuda.max_memory_allocated()`. This metric reports only "deltas" for pytorch-specific allocations, as
`torch.cuda` memory management system doesn't track any memory allocated outside of pytorch. For example, the very
first cuda call typically loads CUDA kernels, which may take from 0.5 to 2GB of GPU memory.
Note that this tracker doesn't account for memory allocations outside of [`Trainer`]'s `__init__`, `train`,
`evaluate` and `predict` calls.
Because `evaluation` calls may happen during `train`, we can't handle nested invocations because
`torch.cuda.max_memory_allocated` is a single counter, so if it gets reset by a nested eval call, `train`'s tracker
will report incorrect info. If this [pytorch issue](https://github.com/pytorch/pytorch/issues/16266) gets resolved
it will be possible to change this class to be re-entrant. Until then we will only track the outer level of
`train`, `evaluate` and `predict` methods. Which means that if `eval` is called during `train`, it's the latter
that will account for its memory usage and that of the former.
This also means that if any other tool that is used along the [`Trainer`] calls
`torch.cuda.reset_peak_memory_stats`, the gpu peak memory stats could be invalid. And the [`Trainer`] will disrupt
the normal behavior of any such tools that rely on calling `torch.cuda.reset_peak_memory_stats` themselves.
For best performance you may want to consider turning the memory profiling off for production runs.
"""
if not self.is_world_process_zero():
return
print(f"***** {split} metrics *****")
metrics_formatted = metrics_format(metrics)
k_width = max(len(str(x)) for x in metrics_formatted)
v_width = max(len(str(x)) for x in metrics_formatted.values())
for key in sorted(metrics_formatted.keys()):
print(f" {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}")
def save_metrics(self, split, metrics, combined=True):
"""
Save metrics into a json file for that split, e.g. `train_results.json`.
Under distributed environment this is done only for a process with rank 0.
Args:
split (`str`):
Mode/split name: one of `train`, `eval`, `test`, `all`
metrics (`dict[str, float]`):
The metrics returned from train/evaluate/predict
combined (`bool`, *optional*, defaults to `True`):
Creates combined metrics by updating `all_results.json` with metrics of this call
To understand the metrics please read the docstring of [`~Trainer.log_metrics`]. The only difference is that raw
unformatted numbers are saved in the current method.
"""
if not self.is_world_process_zero():
return
path = os.path.join(self.args.output_dir, f"{split}_results.json")
with open(path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
if combined:
path = os.path.join(self.args.output_dir, "all_results.json")
if os.path.exists(path):
with open(path) as f:
all_metrics = json.load(f)
else:
all_metrics = {}
all_metrics.update(metrics)
with open(path, "w") as f:
json.dump(all_metrics, f, indent=4, sort_keys=True)
def save_state(self):
"""
Saves the Trainer state, since Trainer.save_model saves only the tokenizer with the model.
Under distributed environment this is done only for a process with rank 0.
"""
if not self.is_world_process_zero():
return
path = os.path.join(self.args.output_dir, "trainer_state.json")
self.state.save_to_json(path)
def get_model_param_count(model, trainable_only=False):
"""
Calculate model's total param count. If trainable_only is True then count only those requiring grads.
"""
if is_deepspeed_zero3_enabled():
def numel(p):
return p.ds_numel if hasattr(p, "ds_numel") else p.numel()
else:
def numel(p):
return p.numel()
return sum(numel(p) for p in model.parameters() if not trainable_only or p.requires_grad)
def get_parameter_names(model, forbidden_layer_types, forbidden_layer_names=None):
"""
Returns the names of the model parameters that are not inside a forbidden layer.
"""
forbidden_layer_patterns = (
[re.compile(pattern) for pattern in forbidden_layer_names] if forbidden_layer_names is not None else []
)
result = []
for name, child in model.named_children():
child_params = get_parameter_names(child, forbidden_layer_types, forbidden_layer_names)
result += [
f"{name}.{n}"
for n in child_params
if not isinstance(child, tuple(forbidden_layer_types))
and not any(pattern.search(f"{name}.{n}".lower()) for pattern in forbidden_layer_patterns)
]
# Add model specific parameters that are not in any child
result += [
k for k in model._parameters if not any(pattern.search(k.lower()) for pattern in forbidden_layer_patterns)
]
return result
def get_module_class_from_name(module, name):
"""
Gets a class from a module by its name.
Args:
module (`torch.nn.Module`): The module to get the class from.
name (`str`): The name of the class.
"""
modules_children = list(module.children())
if module.__class__.__name__ == name:
return module.__class__
elif len(modules_children) == 0:
return
else:
for child_module in modules_children:
module_class = get_module_class_from_name(child_module, name)
if module_class is not None:
return module_class
def remove_dummy_checkpoint(is_main_process, output_dir, filenames):
if is_main_process:
for filename in filenames:
file = os.path.join(output_dir, filename)
if os.path.isfile(file):
os.remove(file)
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
@smp.step()
def smp_forward_backward(model, inputs, gradient_accumulation_steps=1):
outputs = model(**inputs)
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
loss /= gradient_accumulation_steps
model.backward(loss)
return loss
@smp.step()
def smp_forward_only(model, inputs):
return model(**inputs)
def smp_gather(tensor):
if isinstance(tensor, (list, tuple)):
return type(tensor)(smp_gather(t) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: smp_gather(v) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
all_tensors = smp.allgather(tensor, smp.CommGroup.DP_GROUP)
all_tensors = [atleast_1d(t) for t in all_tensors]
return torch.cat([t.cpu() for t in all_tensors], dim=0)
def smp_nested_concat(tensor):
if isinstance(tensor, (list, tuple)):
return type(tensor)(smp_nested_concat(t) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: smp_nested_concat(v) for k, v in tensor.items()})
# It doesn't seem possible to check here if `tensor` is a StepOutput because StepOutput lives in `smp.step`
# which is also the name of the decorator so Python is confused.
return tensor.detach().concat().cpu()
@dataclass
| IterableDatasetShard |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 30591,
"end": 34457
} | class ____(AsyncAdapt_dbapi_module):
def __init__(self, asyncpg):
super().__init__(asyncpg)
self.asyncpg = asyncpg
self.paramstyle = "numeric_dollar"
def connect(self, *arg, **kw):
creator_fn = kw.pop("async_creator_fn", self.asyncpg.connect)
prepared_statement_cache_size = kw.pop(
"prepared_statement_cache_size", 100
)
prepared_statement_name_func = kw.pop(
"prepared_statement_name_func", None
)
return await_(
AsyncAdapt_asyncpg_connection.create(
self,
creator_fn(*arg, **kw),
prepared_statement_cache_size=prepared_statement_cache_size,
prepared_statement_name_func=prepared_statement_name_func,
)
)
class Error(AsyncAdapt_Error):
pgcode: str | None
sqlstate: str | None
detail: str | None
def __init__(self, message, error=None):
super().__init__(message, error)
self.detail = getattr(error, "detail", None)
self.pgcode = self.sqlstate = getattr(error, "sqlstate", None)
class Warning(AsyncAdapt_Error): # noqa
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class RestrictViolationError(IntegrityError):
pass
class NotNullViolationError(IntegrityError):
pass
class ForeignKeyViolationError(IntegrityError):
pass
class UniqueViolationError(IntegrityError):
pass
class CheckViolationError(IntegrityError):
pass
class ExclusionViolationError(IntegrityError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class InternalServerError(InternalError):
pass
class InvalidCachedStatementError(NotSupportedError):
def __init__(self, message, error=None):
super().__init__(
message + " (SQLAlchemy asyncpg dialect will now invalidate "
"all prepared caches in response to this exception)",
)
# pep-249 datatype placeholders. As of SQLAlchemy 2.0 these aren't
# used, however the test suite looks for these in a few cases.
STRING = util.symbol("STRING")
NUMBER = util.symbol("NUMBER")
DATETIME = util.symbol("DATETIME")
@util.memoized_property
def _asyncpg_error_translate(self):
import asyncpg
return {
asyncpg.exceptions.IntegrityConstraintViolationError: self.IntegrityError, # noqa: E501
asyncpg.exceptions.PostgresError: self.Error,
asyncpg.exceptions.SyntaxOrAccessError: self.ProgrammingError,
asyncpg.exceptions.InterfaceError: self.InterfaceError,
asyncpg.exceptions.InvalidCachedStatementError: self.InvalidCachedStatementError, # noqa: E501
asyncpg.exceptions.InternalServerError: self.InternalServerError,
asyncpg.exceptions.RestrictViolationError: self.RestrictViolationError, # noqa: E501
asyncpg.exceptions.NotNullViolationError: self.NotNullViolationError, # noqa: E501
asyncpg.exceptions.ForeignKeyViolationError: self.ForeignKeyViolationError, # noqa: E501
asyncpg.exceptions.UniqueViolationError: self.UniqueViolationError,
asyncpg.exceptions.CheckViolationError: self.CheckViolationError,
asyncpg.exceptions.ExclusionViolationError: self.ExclusionViolationError, # noqa: E501
}
def Binary(self, value):
return value
| AsyncAdapt_asyncpg_dbapi |
python | Textualize__textual | tests/test_animator.py | {
"start": 506,
"end": 4158
} | class ____:
"""An object with animatable properties."""
foo: float | None = 0.0 # Plain float that may be set to None on final_value
bar: Animatable = Animatable(0) # A mock object supporting the animatable protocol
def test_simple_animation():
"""Test an animation from one float to another."""
# Thing that may be animated
animate_test = AnimateTest()
# Fake wall-clock time
time = 100.0
# Object that does the animation
animation = SimpleAnimation(
animate_test,
"foo",
time,
3.0,
start_value=20.0,
end_value=50.0,
final_value=None,
easing=lambda x: x,
)
assert animate_test.foo == 0.0
assert animation(time) is False
assert animate_test.foo == 20.0
assert animation(time + 1.0) is False
assert animate_test.foo == 30.0
assert animation(time + 2.0) is False
assert animate_test.foo == 40.0
assert animation(time + 2.9) is False # Not quite final value
assert animate_test.foo == pytest.approx(49.0)
assert animation(time + 3.0) is True # True to indicate animation is complete
assert animate_test.foo is None # This is final_value
assert animation(time + 3.0) is True
assert animate_test.foo is None
def test_simple_animation_duration_zero():
"""Test animation handles duration of 0."""
# Thing that may be animated
animatable = AnimateTest()
# Fake wall-clock time
time = 100.0
# Object that does the animation
animation = SimpleAnimation(
animatable,
"foo",
time,
0.0,
start_value=20.0,
end_value=50.0,
final_value=50.0,
easing=lambda x: x,
)
assert animation(time) is True # Duration is 0, so this is last value
assert animatable.foo == 50.0
assert animation(time + 1.0) is True
assert animatable.foo == 50.0
def test_simple_animation_reverse():
"""Test an animation from one float to another, where the end value is less than the start."""
# Thing that may be animated
animate_Test = AnimateTest()
# Fake wall-clock time
time = 100.0
# Object that does the animation
animation = SimpleAnimation(
animate_Test,
"foo",
time,
3.0,
start_value=50.0,
end_value=20.0,
final_value=20.0,
easing=lambda x: x,
)
assert animation(time) is False
assert animate_Test.foo == 50.0
assert animation(time + 1.0) is False
assert animate_Test.foo == 40.0
assert animation(time + 2.0) is False
assert animate_Test.foo == 30.0
assert animation(time + 3.0) is True
assert animate_Test.foo == 20.0
def test_animatable():
"""Test SimpleAnimation works with the Animatable protocol"""
animate_test = AnimateTest()
# Fake wall-clock time
time = 100.0
# Object that does the animation
animation = SimpleAnimation(
animate_test,
"bar",
time,
3.0,
start_value=Animatable(20.0),
end_value=Animatable(50.0),
final_value=Animatable(50.0),
easing=lambda x: x,
)
assert animation(time) is False
assert animate_test.bar.value == 20.0
assert animation(time + 1.0) is False
assert animate_test.bar.value == 30.0
assert animation(time + 2.0) is False
assert animate_test.bar.value == 40.0
assert animation(time + 2.9) is False
assert animate_test.bar.value == pytest.approx(49.0)
assert animation(time + 3.0) is True # True to indicate animation is complete
assert animate_test.bar.value == 50.0
| AnimateTest |
python | pytorch__pytorch | torch/_dynamo/variables/nn_module.py | {
"start": 6394,
"end": 39047
} | class ____(VariableTracker):
_nonvar_fields = {
"module_type",
"module_key",
"value",
"nn_module_stack_source",
*VariableTracker._nonvar_fields,
}
def __init__(
self, module_type: type, module_key: str, value: torch.nn.Module, **kwargs
) -> None:
super().__init__(**kwargs)
self.module_type = module_type
self.module_key = module_key
self.value = value
assert self.source
self.nn_module_stack_source = self.source
def get_nn_module_stack_source(self):
return self.nn_module_stack_source or self.source
def set_nn_module_stack_source(self, source):
self.nn_module_stack_source = source
def python_type(self):
return self.module_type
def _wrap_submodule(
self, tx: "InstructionTranslator", source, submod, *key_extra, **options
):
return
def unpack_var_sequence(self, tx):
# implement list/iter/tuple/etc calls
base = tx.output.get_submodule(self.module_key)
if isinstance(base, torch.nn.ModuleDict):
result = []
for name, submod in base.items():
name_var = variables.ConstantVariable.create(name)
tx.output.register_attr_or_module(
submod,
self.module_key,
name,
source=NNModuleSource(GetItemSource(self.source, name)),
)
result.append(name_var)
return result
assert isinstance(
base, (torch.nn.ModuleList, torch.nn.ParameterList, torch.nn.Sequential)
), typestr(base)
assert self.source
result = []
for idx, submod in enumerate(base):
result.append(
tx.output.register_attr_or_module(
submod,
self.module_key,
idx,
source=NNModuleSource(GetItemSource(self.source, idx)),
)
)
return result
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> "ConstantVariable":
mod = tx.output.get_submodule(self.module_key)
result = hasattr(mod, name)
install_guard(
NNModuleSource(AttrSource(self.source, name)).make_guard(
GuardBuilder.HASATTR
)
)
return variables.ConstantVariable.create(result)
def is_training(self, tx):
mod = tx.output.get_submodule(self.module_key)
return getattr(mod, "training", False)
def convert_to_unspecialized(self, tx):
"""Restart analysis treating this module as an UnspecializedNNModuleVariable"""
mod = tx.output.get_submodule(self.module_key)
GenerationTracker.tag(mod)
# Mark the class dynamic unless its module initialization
if tx.f_code.co_name != "__init__":
GenerationTracker.mark_class_dynamic(type(mod))
raise UnspecializeRestartAnalysis
def has_key_in_generic_dict(self, tx: "InstructionTranslator", key):
base = tx.output.get_submodule(self.module_key)
if object_has_getattribute(base):
unimplemented(
gb_type="Custom __getattribute__ in nn.Module dict key check",
context=f"has_key_in_generic_dict {self} {key}",
explanation="Dynamo does not support checking key existence "
"on `nn.Module` instances that have a custom "
"`__getattribute__` method defined.",
hints=[
"Avoid defining `__getattribute__` in your module.",
*graph_break_hints.SUPPORTABLE,
],
)
if tx.output.side_effects.has_pending_mutation_of_attr(self, key):
mutated_attr = tx.output.side_effects.load_attr(self, key, deleted_ok=True)
return not isinstance(mutated_attr, variables.DeletedVariable)
base_dict = object.__getattribute__(base, "__dict__")
return key in base_dict
def _custom_getattr_fallback(self, base, tx, name, obj_source):
"""Check for a __getattr__ and handle it specially if it is implemented"""
if object_has_getattribute(base):
unimplemented(
gb_type="Custom __getattribute__ in nn.Module attribute access",
context=f"var_getattr {self} {name}",
explanation="Dynamo does not support checking key existence "
"on `nn.Module` instances that have a custom "
"`__getattribute__` method defined.",
hints=[
"Avoid defining `__getattribute__` in your module.",
*graph_break_hints.SUPPORTABLE,
],
)
getattr_fn = get_custom_getattr(base, ignore_nn_module_getattr=True)
if getattr_fn is None:
return None
if not isinstance(getattr_fn, types.FunctionType):
unimplemented(
gb_type="torch.nn.Module with a non-function custom __getattr__",
context=f"var_getattr {self} {name}",
explanation=(
"Dynamo detected a nn.Module object with a custom "
"`__getattr__` method, but this method is not a standard "
"Python function (e.g., it might be implemented in C/C++). "
"Dynamo cannot currently trace into such non-standard "
"`__getattr__` methods."
),
hints=[
"Avoid using objects with non-standard __getattr__ methods "
"within the compiled region. If possible, implement "
"__getattr__ as a standard Python function.",
*graph_break_hints.SUPPORTABLE,
],
)
options = {"source": AttrSource(obj_source, "__getattr__")}
return variables.UserMethodVariable(getattr_fn, self, **options).call_function(
tx, [variables.ConstantVariable.create(name)], {}
)
def var_getattr(self, tx: "InstructionTranslator", name):
source = self.source and AttrSource(self.source, name)
base = tx.output.get_submodule(self.module_key)
base_dict = object.__getattribute__(base, "__dict__")
object_member = True
all_class_attribute_names = set()
for x in inspect.getmro(base.__class__):
all_class_attribute_names.update(x.__dict__.keys())
if not self.source:
unimplemented(
gb_type="getattr with no source",
context=f"var_getattr {self} {name}",
explanation="Dynamo does not know how to access an attribute "
"on an `nn.Module` instance that lacks a source. This is "
"usually an internal error in Dynamo.",
hints=[*graph_break_hints.DYNAMO_BUG],
)
if name == "__dict__":
return variables.GetAttrVariable(self, name, source=source)
if name in base_dict:
subobj = base_dict[name]
elif (
"_modules" in base_dict
and name in base_dict["_modules"]
and name not in all_class_attribute_names
):
subobj = base_dict["_modules"][name]
elif "_parameters" in base_dict and name in base_dict["_parameters"]:
subobj = base_dict["_parameters"][name]
elif "_buffers" in base_dict and name in base_dict["_buffers"]:
subobj = base_dict["_buffers"][name]
else:
try:
subobj = inspect.getattr_static(base, name)
object_member = False
except AttributeError:
# see if we can fallback to __getattr__, which is not checked by getattr_static
result = self._custom_getattr_fallback(
base=base, tx=tx, name=name, obj_source=self.source
)
if result is not None:
return result
# if we can't find a __getattr__, we can't parse this, raise attribute error
raise_observed_exception(
AttributeError,
tx,
msg=f"'{type(base).__name__}' object has no attribute '{name}'",
)
if name == "forward":
guard_to_detect_forward_monkeypatching(self.source, base)
if name == "__class__" and not object_member:
return variables.UserDefinedClassVariable(base.__class__, source=source)
if object_member:
out = VariableTracker.build(tx, subobj, NNModuleSource(source))
if isinstance(out, (NNModuleVariable, UnspecializedNNModuleVariable)):
# nn_module_stack source is BC surface area. Ensure that
# mod._modules["linear"] is reflected as mod.linear for
# nn_module_stack.
out.set_nn_module_stack_source(
AttrSource(self.get_nn_module_stack_source(), name)
)
return out
else:
if istype(subobj, property):
if self.source:
# Read the class attribute to reach the property
source = AttrSource(AttrSource(self.source, "__class__"), name)
# Get the getter function
source = AttrSource(source, "fget")
return variables.UserFunctionVariable(
subobj.fget,
source=source,
).call_function(tx, [(self)], {})
elif istype(subobj, classmethod):
return variables.UserMethodVariable(
subobj.__func__,
variables.UserDefinedObjectVariable(type(base)),
source=source,
)
elif istype(subobj, staticmethod):
return variables.UserFunctionVariable(
subobj.__get__(base), source=source
)
elif istype(subobj, types.FunctionType):
return variables.UserMethodVariable(subobj, self, source=source)
elif is_safe_constant(subobj) or istensor(subobj):
# Support possibly common cases of class members
return VariableTracker.build(tx, subobj, NNModuleSource(source))
else:
unimplemented(
gb_type="Unsupported nn.Module attribute type",
context=f"nn.Module subclass: {typestr(base)}, name: {name}, attribute type: {typestr(subobj)}",
explanation=f"Dynamo does not support tracing nn.Module attributes of type `{typestr(subobj)}`",
hints=[
f"Refactor your code so that `{name}` (type `{typestr(subobj)}`) is not an attribute of `{typestr(base)}`",
"Currently supported attribute types are methods, classmethods, staticmethods, "
"properties, constants, and tensors.",
*graph_break_hints.SUPPORTABLE,
],
)
return variables.GetAttrVariable(self, name, source=source)
def call_function(
self,
tx,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
mod = tx.output.get_submodule(self.module_key)
with record_nn_module_stack(
self.module_key, self.get_nn_module_stack_source(), tx, mod
):
is_lazy = is_lazy_module(mod)
if (
isinstance(mod, torch.nn.Sequential)
and mod.__class__.forward is torch.nn.Sequential.forward
):
if nnmodule_has_hooks(mod):
# We do not want to unroll sequential if it has hooks, since evaporating it
# will cause hooks to not fire!
# This terminates and restart the tracing process
self.convert_to_unspecialized(tx)
# Unroll sequential
assert not is_lazy, (
"Expected lazy sequential isn't a valid combination?"
)
if kwargs:
raise_args_mismatch(
tx,
"torch.nn.Module.Sequential",
"0 kwargs",
f"{len(kwargs)} kwargs",
)
(arg,) = args
# TODO: Use named_children when it supports remove_duplicate=False.
for child_name, submod in mod._modules.items():
tx.call_function(
tx.output.register_attr_or_module(
submod,
self.module_key,
child_name,
source=NNModuleSource(AttrSource(self.source, child_name)),
),
[arg],
{},
)
arg = tx.pop()
return arg
if is_lazy:
# The module type will change after it is called
if mod.cls_to_become is not None:
self.module_type = mod.cls_to_become
# The pre-hook runs to initialize the module shapes, then deletes itself. After this,
# the module is more or less not lazy and can be treated as a normal module regardless of
# is_allowed or other variations.
initialize_lazy_module(tx, mod, args, kwargs)
# If we are tracing the higher order op, we want Dynamo to step
# inside the module call so that Dynamo can see the underlying
# parameters and buffers and raise them as inputs to the graph.
#
# NB: torch.nn.utils.parametrize changes the class type of a
# parametrized module such that its __module__ points to
# "torch.nn.utils.parametrize".
if (
tx.output.is_root_tracer()
and mod.__module__.startswith(("torch.nn.", "torch.ao."))
and mod.__module__ != "torch.nn.utils.parametrize"
# this basically means we are using the new strict export tracer which wraps the
# user callable, so we shouldn't directly proxy in the fx graph
and not isinstance(
mod, torch.ao.quantization.pt2e.export_utils._WrapperModule
)
):
if nnmodule_has_hooks(
mod, check_forward_hooks=True, check_backward_hooks=True
):
# End of fn, this bubbles up and restarts tracing.
self.convert_to_unspecialized(tx)
from .builder import wrap_fx_proxy
return wrap_fx_proxy(
tx=tx,
proxy=tx.output.create_proxy(
"call_module",
self.module_key,
*proxy_args_kwargs(args, kwargs),
),
)
else:
assert self.source, (
"Must provide a valid source in order to inline, "
"since inlined function may have default args which must be guarded."
)
if isinstance(mod, torch.fx.GraphModule):
# TODO: do we want to support __call__ for GM's?
# If so at least some changes are needed, we don't allow inlining
# the call_wrapped currently, and maybe other issues too
fn = mod.forward
fn_source = AttrSource(self.source, "forward")
else:
fn = mod._call_impl
fn_source = AttrSource(self.source, "_call_impl")
if istype(fn, types.MethodType):
fn = fn.__func__
fn_source = AttrSource(fn_source, "__func__")
args = [self] + args
else:
assert istype(fn, types.FunctionType)
return tx.inline_user_function_return(
variables.UserFunctionVariable(fn, source=fn_source),
args,
kwargs,
)
def call_method(
self,
tx,
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
constant=False,
) -> "VariableTracker":
from . import ConstantVariable, ListIteratorVariable, TupleVariable
key = self.module_key
module = tx.output.get_submodule(key)
def generic_call_method_helper(name):
# Helper function to put a `call_method` node in FX graph,
# with nn.Module as the first arg.
mod_proxy = tx.output.create_proxy(
"get_attr",
self.module_key,
(),
{},
)
set_example_value(mod_proxy.node, module)
proxy_args, proxy_kwargs = proxy_args_kwargs(args, kwargs)
from .builder import wrap_fx_proxy
return wrap_fx_proxy(
tx=tx,
proxy=tx.output.create_proxy(
"call_method",
name,
args=(mod_proxy, *proxy_args),
kwargs=proxy_kwargs,
),
)
if name in ["_call_impl", "_wrapped_call_impl"]:
# Example: `self.layer.__call__(x)`
# This is used for explicit calling `__call__` in a forward function.
# Dynamo inlines `__call__`, includes hooks.
return self.call_function(tx, args, kwargs)
elif name == "forward":
# Example: `self.layer.forward(x)`
# This is used for explicit calling `forward` in a forward function.
# Dynamo puts `call_method` node in FX, doesn't trigger hooks.
with record_nn_module_stack(
self.module_key, self.get_nn_module_stack_source(), tx, module
):
return generic_call_method_helper(name)
if name == "_check_input_dim" and trace_rules.is_torch_inline_allowed(
inspect.getfile(module.__class__._check_input_dim)
):
return ConstantVariable.create(True)
if name == "_get_item_by_idx":
if not args[1].is_python_constant():
raise_type_error_exc(
tx,
f"``nn.Module`` {module}'s call method {name} requires a constant index argument",
)
if not isinstance(args[0], TupleVariable):
raise_type_error_exc(
tx,
f"``nn.Module`` {module}'s call method {name} requires a tuple as first argument",
)
mod_var = args[0].items[args[1].value]
if isinstance(mod_var, UnspecializedNNModuleVariable):
return mod_var
key = mod_var.module_key
submod = tx.output.get_submodule(key)
return tx.output.register_attr_or_module(
submod,
key,
key,
source=NNModuleSource(GetItemSource(self.source, key)),
)
if constant:
fn = getattr(module, name)
name = f"{module.__class__.__name__}_{name}_result"
return invoke_and_store_as_constant(tx, fn, name, args, kwargs)
def assert_all_args_kwargs_const():
if not all(
x.is_python_constant() for x in itertools.chain(args, kwargs.values())
):
unimplemented(
gb_type="non-const argument in nn.Module method",
context=f"call_method: {self} {name} {args} {kwargs}",
explanation="Dynamo does not support calling "
f"method `{name}` of ``nn.Module`` {module} with non-constant arguments.",
hints=[],
)
def get_kwargs(*names):
assert_all_args_kwargs_const()
fn = getattr(module, name)
bound_args = inspect.signature(fn).bind(
*([x.as_python_constant() for x in args]),
**{k: v.as_python_constant() for k, v in kwargs.items()},
)
bound_args.apply_defaults()
bound_args = bound_args.arguments
return {k: bound_args[k] for k in names}
def wrap_values(items):
result = []
for name, submod in items:
result.append(
tx.output.register_attr_or_module(
submod,
key,
name,
source=NNModuleSource(gen_source(self.source, name)),
)
)
return ListIteratorVariable(result, mutation_type=ValueMutationNew())
def named_embed(name, obj):
return TupleVariable(
[
ConstantVariable.create(name),
tx.output.register_attr_or_module(
obj,
key,
name,
source=NNModuleSource(gen_source(self.source, name)),
),
]
)
def gen_source(source, name):
name_split = name.split(".")
if name_split[0] == "":
return source
while len(name_split) > 0:
x = name_split.pop(0)
source = AttrSource(source, x)
return source
if name == "named_children":
tx.output.guard_on_key_order.add(AttrSource(self.source, "_modules"))
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
result = []
for name, submod in module.named_children():
result.append(named_embed(name, submod))
return ListIteratorVariable(result, mutation_type=ValueMutationNew())
elif name == "named_parameters":
tx.output.guard_on_key_order.add(AttrSource(self.source, "_parameters"))
result = []
for name, param in module.named_parameters(
**get_kwargs("prefix", "recurse")
):
result.append(named_embed(name, param))
return ListIteratorVariable(result, mutation_type=ValueMutationNew())
elif name == "named_buffers":
tx.output.guard_on_key_order.add(AttrSource(self.source, "_buffers"))
result = []
for name, buffer in module.named_buffers(
**get_kwargs("prefix", "recurse", "remove_duplicate")
):
result.append(named_embed(name, buffer))
return ListIteratorVariable(result, mutation_type=ValueMutationNew())
elif name == "named_modules":
tx.output.guard_on_key_order.add(AttrSource(self.source, "_modules"))
result = []
for name, submod in module.named_modules(
**get_kwargs("memo", "prefix", "remove_duplicate")
):
result.append(named_embed(name, submod))
return ListIteratorVariable(result, mutation_type=ValueMutationNew())
elif name == "children":
tx.output.guard_on_key_order.add(AttrSource(self.source, "_modules"))
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
return wrap_values(module.named_children())
elif name == "modules":
tx.output.guard_on_key_order.add(AttrSource(self.source, "_modules"))
return wrap_values(module.named_modules())
elif name == "parameters":
tx.output.guard_on_key_order.add(AttrSource(self.source, "_parameters"))
return wrap_values(module.named_parameters(**get_kwargs("recurse")))
elif name == "buffers":
tx.output.guard_on_key_order.add(AttrSource(self.source, "_buffers"))
return wrap_values(module.named_buffers(**get_kwargs("recurse")))
elif name == "keys":
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
result = []
for name in module:
result.append(ConstantVariable.create(name))
return ListIteratorVariable(result, mutation_type=ValueMutationNew())
elif name == "values":
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
return wrap_values(module.items())
elif name == "items":
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
result = []
for name, submod in module.items():
result.append(named_embed(name, submod))
return ListIteratorVariable(result, mutation_type=ValueMutationNew())
elif name == "__len__":
if args or kwargs:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
return ConstantVariable.create(len(module))
elif name == "__iter__":
return ListIteratorVariable(
self.unpack_var_sequence(tx), mutation_type=ValueMutationNew()
)
elif (
name == "__contains__"
and isinstance(module, (torch.nn.ModuleDict, torch.nn.ParameterDict))
and args
and args[0].is_python_constant()
):
return ConstantVariable.create(
args[0].as_python_constant() in module._modules
)
elif name == "__getitem__":
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
builtin_supported = (
torch.nn.ModuleDict.__getitem__,
torch.nn.ModuleList.__getitem__,
torch.nn.ParameterDict.__getitem__,
torch.nn.ParameterList.__getitem__,
torch.nn.Sequential.__getitem__,
)
if type(module).__getitem__ not in builtin_supported:
if not (
isinstance(args[0], variables.ConstantVariable)
and isinstance(args[0].as_python_constant(), (str, int))
):
unimplemented(
gb_type="Invalid or non-const argument in nn.Module __getitem__",
context=f"call_method: {self} {name} {args} {kwargs}",
explanation="Dynamo does not support calling "
f"method `{name}` of ``nn.Module`` {module} with a non-constant or non-(str, int) key.",
hints=[
"Use constant arguments of type str or int for __getitem__"
],
)
fn = getattr(module, name).__func__
assert isinstance(fn, types.FunctionType)
src = AttrSource(AttrSource(self.source, name), "__func__")
return tx.inline_user_function_return(
variables.UserFunctionVariable(fn, source=src),
[self] + list(args),
kwargs,
)
assert self.source
if isinstance(args[0], SliceVariable):
# TODO(anijain2305,export-team) - Remove this if condition when inlining of inbuilt nn modules is
# enabled for export.
if tx.output.export:
# Build a TupleVariable of NNModules
result = []
# Turn the slice into the list of integers
keys = list(range(len(module)))[args[0].as_python_constant()]
for idx, submod in enumerate(module[args[0].as_python_constant()]):
key = keys[idx]
src = NNModuleSource(GetItemSource(self.source, key))
result.append(
tx.output.register_attr_or_module(
submod,
key,
source=src,
)
)
new_module = module[args[0].as_python_constant()]
new_module_variable = tx.output.register_attr_or_module(
new_module,
f"{self}.__getitem__(slice)",
source=NNModuleSource(
GetItemSource(self.source, args[0].as_python_constant())
),
)
return new_module_variable
else:
# slice on nn module results in a creation of new module instance, so we need to make it sourceless.
# Convert to unspecialized so that UnspecializedNNModule variable can take care of it.
self.convert_to_unspecialized(tx)
from .tensor import SymNodeVariable
if isinstance(args[0], SymNodeVariable):
key = args[0].evaluate_expr(tx.output)
elif args[0].is_python_constant():
key = args[0].as_python_constant()
else:
unimplemented(
gb_type="Unsupported key type for nn.Module.__getitem__",
context=f"call_method: {self} {name} {args} {kwargs}",
explanation="Dynamo does not support getitem on "
"`nn.Module` with non-constant key.",
hints=[],
)
submod = module[key]
return tx.output.register_attr_or_module(
submod,
self.module_key,
key,
source=NNModuleSource(GetItemSource(self.source, key)),
)
elif (
name == "_get_abs_string_index"
or (
isinstance(module, torch.nn.modules.conv._ConvNd)
and name == "_conv_forward"
)
or (
isinstance(module, torch.nn.modules.conv._ConvTransposeNd)
and name == "_output_padding"
)
):
# Inline the function
fn = getattr(module, name).__func__
fn_source = AttrSource(AttrSource(self.source, name), "__func__")
return tx.inline_user_function_return(
variables.UserFunctionVariable(fn, source=fn_source),
[self] + args,
kwargs,
)
# A loose heuristic, but seems to be generally good before we drop into the
# manual handling of inputs
elif (
name in module.__class__.__dict__
and callable(module.__class__.__dict__[name])
and all(
isinstance(x, variables.TensorVariable)
for x in itertools.chain(args, kwargs.values())
)
):
return generic_call_method_helper(name)
else:
return super().call_method(tx, name, args, kwargs)
| NNModuleVariable |
python | django__django | tests/select_related/models.py | {
"start": 596,
"end": 725
} | class ____(models.Model):
name = models.CharField(max_length=50)
domain = models.ForeignKey(Domain, models.CASCADE)
| Kingdom |
python | ray-project__ray | python/ray/autoscaler/_private/subprocess_output_util.py | {
"start": 1097,
"end": 15003
} | class ____(Exception):
def __init__(self, msg, msg_type, code=None, command=None, special_case=None):
super(ProcessRunnerError, self).__init__(
"{} (discovered={}): type={}, code={}, command={}".format(
msg, special_case, msg_type, code, command
)
)
self.msg_type = msg_type
self.code = code
self.command = command
self.special_case = special_case
_ssh_output_regexes = {
"known_host_update": re.compile(
r"\s*Warning: Permanently added '.+' \(.+\) " r"to the list of known hosts.\s*"
),
"connection_closed": re.compile(r"\s*Shared connection to .+ closed.\s*"),
"timeout": re.compile(
r"\s*ssh: connect to host .+ port .+: " r"Operation timed out\s*"
),
"conn_refused": re.compile(
r"\s*ssh: connect to host .+ port .+: Connection refused\s*"
)
# todo: check for other connection failures for better error messages?
}
def _read_subprocess_stream(f, output_file, is_stdout=False):
"""Read and process a subprocess output stream.
The goal is to find error messages and respond to them in a clever way.
Currently just used for SSH messages (CONN_REFUSED, TIMEOUT, etc.), so
the user does not get confused by these.
Ran in a thread each for both `stdout` and `stderr` to
allow for cross-platform asynchronous IO.
Note: `select`-based IO is another option, but Windows has
no support for `select`ing pipes, and Linux support varies somewhat.
Spefically, Older *nix systems might also have quirks in how they
handle `select` on pipes.
Args:
f: File object for the stream.
output_file: File object to which filtered output is written.
is_stdout (bool):
When `is_stdout` is `False`, the stream is assumed to
be `stderr`. Different error message detectors are used,
and the output is displayed to the user unless it matches
a special case (e.g. SSH timeout), in which case this is
left up to the caller.
"""
detected_special_case = None
while True:
# ! Readline here is crucial.
# ! Normal `read()` will block until EOF instead of until
# something is available.
line = f.readline()
if line is None or line == "":
# EOF
break
if line[-1] == "\n":
line = line[:-1]
if not is_stdout:
if _ssh_output_regexes["connection_closed"].fullmatch(line) is not None:
# Do not log "connection closed" messages which SSH
# puts in stderr for no reason.
#
# They are never errors since the connection will
# close no matter whether the command succeeds or not.
continue
if _ssh_output_regexes["timeout"].fullmatch(line) is not None:
# Timeout is not really an error but rather a special
# condition. It should be handled by the caller, since
# network conditions/nodes in the early stages of boot
# are expected to sometimes cause connection timeouts.
if detected_special_case is not None:
raise ValueError(
"Bug: ssh_timeout conflicts with another "
"special codition: " + detected_special_case
)
detected_special_case = "ssh_timeout"
continue
if _ssh_output_regexes["conn_refused"].fullmatch(line) is not None:
# Connection refused is not really an error but
# rather a special condition. It should be handled by
# the caller, since network conditions/nodes in the
# early stages of boot are expected to sometimes cause
# CONN_REFUSED.
if detected_special_case is not None:
raise ValueError(
"Bug: ssh_conn_refused conflicts with another "
"special codition: " + detected_special_case
)
detected_special_case = "ssh_conn_refused"
continue
if _ssh_output_regexes["known_host_update"].fullmatch(line) is not None:
# Since we ignore SSH host control anyway
# (-o UserKnownHostsFile=/dev/null),
# we should silence the host control warnings.
continue
cli_logger.error(line)
if output_file is not None and output_file != subprocess.DEVNULL:
output_file.write(line + "\n")
return detected_special_case
def _run_and_process_output(
cmd,
stdout_file,
process_runner=subprocess,
stderr_file=None,
use_login_shells=False,
):
"""Run a command and process its output for special cases.
Calls a standard 'check_call' if process_runner is not subprocess.
Specifically, run all command output through regex to detect
error conditions and filter out non-error messages that went to stderr
anyway (SSH writes ALL of its "system" messages to stderr even if they
are not actually errors).
Args:
cmd (List[str]): Command to run.
process_runner: Used for command execution. Assumed to have
'check_call' and 'check_output' inplemented.
stdout_file: File to redirect stdout to.
stderr_file: File to redirect stderr to.
Implementation notes:
1. `use_login_shells` disables special processing
If we run interactive apps, output processing will likely get
overwhelmed with the interactive output elements.
Thus, we disable output processing for login shells. This makes
the logging experience considerably worse, but it only degrades
to old-style logging.
For example, `pip install` outputs HUNDREDS of progress-bar lines
when downloading a package, and we have to
read + regex + write all of them.
After all, even just printing output to console can often slow
down a fast-printing app, and we do more than just print, and
all that from Python, which is much slower than C regarding
stream processing.
2. `stdin=PIPE` for subprocesses
Do not inherit stdin as it messes with bash signals
(ctrl-C for SIGINT) and these commands aren't supposed to
take input anyway.
3. `ThreadPoolExecutor` without the `Pool`
We use `ThreadPoolExecutor` to create futures from threads.
Threads are never reused.
This approach allows us to have no custom synchronization by
off-loading the return value and exception passing to the
standard library (`ThreadPoolExecutor` internals).
This instance will be `shutdown()` ASAP so it's fine to
create one in such a weird place.
The code is thus 100% thread-safe as long as the stream readers
are read-only except for return values and possible exceptions.
"""
stdin_overwrite = subprocess.PIPE
# This already should be validated in a higher place of the stack.
assert not (
does_allow_interactive() and is_output_redirected()
), "Cannot redirect output while in interactive mode."
if process_runner != subprocess or (
does_allow_interactive() and not is_output_redirected()
):
stdin_overwrite = None
# See implementation note #1
if use_login_shells or process_runner != subprocess:
return process_runner.check_call(
cmd,
# See implementation note #2
stdin=stdin_overwrite,
stdout=stdout_file,
stderr=stderr_file,
)
with subprocess.Popen(
cmd,
# See implementation note #2
stdin=stdin_overwrite,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1, # line buffering
universal_newlines=True, # text mode outputs
) as p:
from concurrent.futures import ThreadPoolExecutor
# Closing stdin might be necessary to signal EOF to some
# apps (they might get stuck waiting for input forever otherwise).
p.stdin.close()
# See implementation note #3
with ThreadPoolExecutor(max_workers=2) as executor:
stdout_future = executor.submit(
_read_subprocess_stream, p.stdout, stdout_file, is_stdout=True
)
stderr_future = executor.submit(
_read_subprocess_stream, p.stderr, stderr_file, is_stdout=False
)
# Wait for completion.
executor.shutdown()
# Update `p.returncode`
p.poll()
detected_special_case = stdout_future.result()
if stderr_future.result() is not None:
if detected_special_case is not None:
# This might some day need to be changed.
# We should probably make sure the two special cases
# are compatible then and that we can handle both by
# e.g. reporting both to the caller.
raise ValueError(
"Bug: found a special case in both stdout and "
"stderr. This is not valid behavior at the time "
"of writing this code."
)
detected_special_case = stderr_future.result()
if p.returncode > 0:
# Process failed, but not due to a signal, since signals
# set the exit code to a negative value.
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=p.returncode,
command=cmd,
special_case=detected_special_case,
)
elif p.returncode < 0:
# Process failed due to a signal, since signals
# set the exit code to a negative value.
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=p.returncode,
command=cmd,
special_case="died_to_signal",
)
return p.returncode
def run_cmd_redirected(
cmd, process_runner=subprocess, silent=False, use_login_shells=False
):
"""Run a command and optionally redirect output to a file.
Args:
cmd (List[str]): Command to run.
process_runner: Process runner used for executing commands.
silent: If true, the command output will be silenced completely
(redirected to /dev/null), unless verbose logging
is enabled. Use this for running utility commands like
rsync.
"""
if silent and cli_logger.verbosity < 1:
return _run_and_process_output(
cmd,
process_runner=process_runner,
stdout_file=process_runner.DEVNULL,
stderr_file=process_runner.DEVNULL,
use_login_shells=use_login_shells,
)
if not is_output_redirected():
return _run_and_process_output(
cmd,
process_runner=process_runner,
stdout_file=sys.stdout,
stderr_file=sys.stderr,
use_login_shells=use_login_shells,
)
else:
tmpfile_path = os.path.join(
tempfile.gettempdir(), "ray-up-{}-{}.txt".format(cmd[0], time.time())
)
with open(
tmpfile_path,
mode="w",
# line buffering
buffering=1,
) as tmp:
cli_logger.verbose("Command stdout is redirected to {}", cf.bold(tmp.name))
return _run_and_process_output(
cmd,
process_runner=process_runner,
stdout_file=tmp,
stderr_file=tmp,
use_login_shells=use_login_shells,
)
def handle_ssh_fails(e, first_conn_refused_time, retry_interval):
"""Handle SSH system failures coming from a subprocess.
Args:
e: The `ProcessRunnerException` to handle.
first_conn_refused_time:
The time (as reported by this function) or None,
indicating the last time a CONN_REFUSED error was caught.
After exceeding a patience value, the program will be aborted
since SSH will likely never recover.
retry_interval: The interval after which the command will be retried,
used here just to inform the user.
"""
if e.msg_type != "ssh_command_failed":
return
if e.special_case == "ssh_conn_refused":
if (
first_conn_refused_time is not None
and time.time() - first_conn_refused_time > CONN_REFUSED_PATIENCE
):
cli_logger.error(
"SSH connection was being refused "
"for {} seconds. Head node assumed "
"unreachable.",
cf.bold(str(CONN_REFUSED_PATIENCE)),
)
cli_logger.abort(
"Check the node's firewall settings "
"and the cloud network configuration."
)
cli_logger.warning("SSH connection was refused.")
cli_logger.warning(
"This might mean that the SSH daemon is "
"still setting up, or that "
"the host is inaccessable (e.g. due to "
"a firewall)."
)
return time.time()
if e.special_case in ["ssh_timeout", "ssh_conn_refused"]:
cli_logger.print(
"SSH still not available, retrying in {} seconds.",
cf.bold(str(retry_interval)),
)
else:
raise e
return first_conn_refused_time
| ProcessRunnerError |
python | kubernetes-client__python | kubernetes/client/models/v1_ingress_list.py | {
"start": 383,
"end": 6824
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1Ingress]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1IngressList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1IngressList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1IngressList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1IngressList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1IngressList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1IngressList. # noqa: E501
items is the list of Ingress. # noqa: E501
:return: The items of this V1IngressList. # noqa: E501
:rtype: list[V1Ingress]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1IngressList.
items is the list of Ingress. # noqa: E501
:param items: The items of this V1IngressList. # noqa: E501
:type: list[V1Ingress]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1IngressList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1IngressList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1IngressList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1IngressList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1IngressList. # noqa: E501
:return: The metadata of this V1IngressList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1IngressList.
:param metadata: The metadata of this V1IngressList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IngressList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IngressList):
return True
return self.to_dict() != other.to_dict()
| V1IngressList |
python | scrapy__scrapy | tests/test_downloadermiddleware_httpcache.py | {
"start": 25509,
"end": 25725
} | class ____(TestBase, StorageTestMixin, DummyPolicyTestMixin):
storage_class = "scrapy.extensions.httpcache.DbmCacheStorage"
policy_class = "scrapy.extensions.httpcache.DummyPolicy"
| TestDbmStorageWithDummyPolicy |
python | conda__conda | conda/exceptions.py | {
"start": 32275,
"end": 32430
} | class ____(CondaError, ValueError):
def __init__(self, message: str, *args, **kwargs):
super().__init__(message, *args, **kwargs)
| CondaValueError |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 10079,
"end": 10724
} | class ____(ASTLiteral):
def __init__(self, data: str) -> None:
self.data = data
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTStringLiteral):
return NotImplemented
return self.data == other.data
def __hash__(self) -> int:
return hash(self.data)
def _stringify(self, transform: StringifyTransform) -> str:
return self.data
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
txt = str(self)
signode += addnodes.desc_sig_literal_string(txt, txt)
| ASTStringLiteral |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 11096,
"end": 11521
} | class ____:
"""Test fr_CA currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.fr_CA import Provider as FrCaCurrencyProvider
cls.provider = FrCaCurrencyProvider
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
| TestFrCa |
python | realpython__materials | thread-safety-locks/bank_thread_safe.py | {
"start": 81,
"end": 1096
} | class ____:
def __init__(self, balance=0):
self.balance = balance
self.account_lock = threading.Lock()
def withdraw(self, amount):
with self.account_lock:
if self.balance >= amount:
new_balance = self.balance - amount
print(f"Withdrawing {amount}...")
time.sleep(0.1) # Simulate a delay
self.balance = new_balance
else:
raise ValueError("Insufficient balance")
def deposit(self, amount):
with self.account_lock:
new_balance = self.balance + amount
print(f"Depositing {amount}...")
time.sleep(0.1) # Simulate a delay
self.balance = new_balance
account = BankAccount(1000)
with ThreadPoolExecutor(max_workers=3) as executor:
executor.submit(account.withdraw, 700)
executor.submit(account.deposit, 1000)
executor.submit(account.withdraw, 300)
print(f"Final account balance: {account.balance}")
| BankAccount |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/init_ops_test.py | {
"start": 16437,
"end": 20212
} | class ____(test.TestCase):
def _Range(self, start, limit, delta):
with self.cached_session():
tf_ans = math_ops.range(start, limit, delta, name="range")
self.assertEqual([len(np.arange(start, limit, delta))],
tf_ans.get_shape())
return self.evaluate(tf_ans)
def testBasic(self):
self.assertTrue(
np.array_equal(self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(
np.array_equal(self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(
np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32)
@test_util.run_deprecated_v1
def testLimitOnly(self):
with self.session():
self.assertAllEqual(np.arange(5), math_ops.range(5))
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
def testNonInteger(self):
self.assertTrue(
np.allclose(self._Range(0, 2, 0.5), np.array([0, 0.5, 1, 1.5])))
self.assertTrue(np.allclose(self._Range(0, 5, 2.5), np.array([0, 2.5])))
self.assertTrue(
np.allclose(self._Range(0, 3, 0.9), np.array([0, 0.9, 1.8, 2.7])))
self.assertTrue(
np.allclose(
self._Range(100., 500., 100.), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32)
def testNegativeDelta(self):
self.assertTrue(
np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))
self.assertTrue(
np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
self.assertTrue(
np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))
def testDType(self):
zero_int32 = math_ops.cast(0, dtypes.int32)
zero_int64 = math_ops.cast(0, dtypes.int64)
zero_float32 = math_ops.cast(0, dtypes.float32)
zero_float64 = math_ops.cast(0, dtypes.float64)
self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32)
self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64)
self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32)
self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32)
self.assertEqual(
math_ops.range(0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64)
def testMixedDType(self):
# Test case for GitHub issue 35710
tf_ans = math_ops.range(
constant_op.constant(4, dtype=dtypes.int32), dtype=dtypes.int64)
self.assertAllEqual(self.evaluate(tf_ans), np.array([0, 1, 2, 3]))
def testLargeStarts(self):
# Test case for GitHub issue 46899.
with self.session():
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
v = math_ops.range(start=-1e+38, limit=1)
self.evaluate(v)
# TODO(vrv): move to sequence_ops_test?
| RangeTest |
python | fluentpython__example-code-2e | 10-dp-1class-func/untyped/strategy_param.py | {
"start": 1234,
"end": 2703
} | class ____: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self) # <1>
return self.total() - discount
def __repr__(self):
return f'<Order total: {self.total():.2f} due: {self.due():.2f}>'
def fidelity_promo(percent):
"""discount for customers with 1000 or more fidelity points"""
return lambda order: (order.total() * percent / 100
if order.customer.fidelity >= 1000 else 0)
def bulk_item_promo(percent):
"""discount for each LineItem with 20 or more units"""
def discounter(order):
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * percent / 100
return discount
return discounter
def large_order_promo(percent):
"""discount for orders with 10 or more distinct items"""
def discounter(order):
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * percent / 100
return 0
return discounter
| Order |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 46341,
"end": 47061
} | class ____(GenericViewSet):
"""
Example via: https://stackoverflow.com/questions/43778668/django-rest-framwork-occured-typeerror-link-object-does-not-support-item-ass/
"""
permission_classes = ()
@action(detail=False)
def detail(self, request):
return {}
@action(detail=False, url_path='detail/export')
def detail_export(self, request):
return {}
naming_collisions_router = SimpleRouter()
naming_collisions_router.register(r'collision', NamingCollisionViewSet, basename="collision")
@pytest.mark.skipif(not coreapi, reason='coreapi is not installed')
@override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'})
| NamingCollisionViewSet |
python | kevin1024__vcrpy | vcr/errors.py | {
"start": 1849,
"end": 1963
} | class ____(KeyError):
"""Raised when a cassette does not contain the request we want."""
| UnhandledHTTPRequestError |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/replicate_test.py | {
"start": 10095,
"end": 14063
} | class ____(test_base.DatasetTestBase,
parameterized.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(EagerClusterReplicateTest, self).__init__(methodName)
self._job_name = "remove_device"
self._device0 = "/job:%s/replica:0/task:0/device:CPU:0" % self._job_name
self._device1 = "/job:%s/replica:0/task:1/device:CPU:0" % self._job_name
self._device2 = "/job:%s/replica:0/task:2/device:CPU:0" % self._job_name
def setUp(self):
super(EagerClusterReplicateTest, self).setUp()
# TODO(b/171412104): Move create server to __init__ once tfrt support it.
self._cached_server1 = server_lib.Server.create_local_server()
self._cached_server2 = server_lib.Server.create_local_server()
self._cached_server1_target = self._cached_server1.target[len("grpc://"):]
self._cached_server2_target = self._cached_server2.target[len("grpc://"):]
# Start the local server.
local_port = pywrap_tfe.TF_PickUnusedPortOrDie()
context.set_server_def(
server_def=_get_server_def(
self._job_name,
local_server_port=local_port,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
def tearDown(self):
super().tearDown()
# Clear the current device scope to avoid polluting other test cases.
ops.device(None).__enter__()
# Reset the context to avoid polluting other test cases.
context._reset_context()
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testBasic(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(dataset0, range(100))
with ops.device(self._device1):
self.assertDatasetProduces(dataset1, range(100))
with ops.device(self._device2):
self.assertDatasetProduces(dataset2, range(100))
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testMap(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(lambda x: x * 2)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(dataset0, range(0, 200, 2))
with ops.device(self._device1):
self.assertDatasetProduces(dataset1, range(0, 200, 2))
with ops.device(self._device2):
self.assertDatasetProduces(dataset2, range(0, 200, 2))
@combinations.generate(
combinations.combine(tf_api_version=[2], mode=["eager"]))
def testVariableInput(self):
with ops.device(self._device0):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: counter_var.assign_add(1))
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
self.assertDatasetProduces(
dataset0, range(1, 101), requires_initialization=True)
with ops.device(self._device1):
self.assertDatasetProduces(
dataset1, range(1, 101), requires_initialization=True)
with ops.device(self._device2):
self.assertDatasetProduces(
dataset2, range(1, 101), requires_initialization=True)
| EagerClusterReplicateTest |
python | pandas-dev__pandas | asv_bench/benchmarks/eval.py | {
"start": 1237,
"end": 1988
} | class ____:
def setup(self):
N = 10**6
halfway = (N // 2) - 1
index = pd.date_range("20010101", periods=N, freq="min")
s = pd.Series(index)
self.ts = s.iloc[halfway]
self.df = pd.DataFrame({"a": np.random.randn(N), "dates": index}, index=index)
data = np.random.randn(N)
self.min_val = data.min()
self.max_val = data.max()
def time_query_datetime_index(self):
self.df.query("index < @self.ts")
def time_query_datetime_column(self):
self.df.query("dates < @self.ts")
def time_query_with_boolean_selection(self):
self.df.query("(a >= @self.min_val) & (a <= @self.max_val)")
from .pandas_vb_common import setup # noqa: F401 isort:skip
| Query |
python | ray-project__ray | python/ray/data/preprocessors/discretizer.py | {
"start": 311,
"end": 2285
} | class ____(Preprocessor):
"""Abstract base class for all KBinsDiscretizers.
Essentially a thin wraper around ``pd.cut``.
Expects either ``self.stats_`` or ``self.bins`` to be set and
contain {column:list_of_bin_intervals}.
"""
def _transform_pandas(self, df: pd.DataFrame):
def bin_values(s: pd.Series) -> pd.Series:
if s.name not in self.columns:
return s
labels = self.dtypes.get(s.name) if self.dtypes else False
ordered = True
if labels:
if isinstance(labels, pd.CategoricalDtype):
ordered = labels.ordered
labels = list(labels.categories)
else:
labels = False
bins = self.stats_ if self._is_fittable else self.bins
return pd.cut(
s,
bins[s.name] if isinstance(bins, dict) else bins,
right=self.right,
labels=labels,
ordered=ordered,
retbins=False,
include_lowest=self.include_lowest,
duplicates=self.duplicates,
)
binned_df = df.apply(bin_values, axis=0)
df[self.output_columns] = binned_df[self.columns]
return df
def _validate_bins_columns(self):
if isinstance(self.bins, dict) and not all(
col in self.bins for col in self.columns
):
raise ValueError(
"If `bins` is a dictionary, all elements of `columns` must be present "
"in it."
)
def __repr__(self):
attr_str = ", ".join(
[
f"{attr_name}={attr_value!r}"
for attr_name, attr_value in vars(self).items()
if not attr_name.startswith("_")
]
)
return f"{self.__class__.__name__}({attr_str})"
@PublicAPI(stability="alpha")
| _AbstractKBinsDiscretizer |
python | numba__numba | numba/cuda/tests/cudapy/test_globals.py | {
"start": 504,
"end": 1384
} | class ____(CUDATestCase):
def test_global_int_const(self):
"""Test simple_smem
"""
compiled = cuda.jit("void(int32[:])")(simple_smem)
nelem = 100
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32)))
@unittest.SkipTest
def test_global_tuple_const(self):
"""Test coop_smem2d
"""
compiled = cuda.jit("void(float32[:,:])")(coop_smem2d)
shape = 10, 20
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape](ary)
exp = np.empty_like(ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = float(i + 1) / (j + 1)
self.assertTrue(np.allclose(ary, exp))
if __name__ == '__main__':
unittest.main()
| TestCudaTestGlobal |
python | ray-project__ray | python/ray/data/_internal/datasource/kafka_datasource.py | {
"start": 8397,
"end": 22328
} | class ____(Datasource):
"""Kafka datasource for reading from Kafka topics with bounded reads."""
# Batch size for incremental block yielding
BATCH_SIZE_FOR_YIELD = 1000
def __init__(
self,
topics: Union[str, List[str]],
bootstrap_servers: Union[str, List[str]],
start_offset: Union[int, Literal["earliest"]] = "earliest",
end_offset: Union[int, Literal["latest"]] = "latest",
kafka_auth_config: Optional[KafkaAuthConfig] = None,
timeout_ms: int = 10000,
):
"""Initialize Kafka datasource.
Args:
topics: Kafka topic name(s) to read from.
bootstrap_servers: Kafka broker addresses (string or list of strings).
start_offset: Starting position. Can be:
- int: Offset number
- str: "earliest"
end_offset: Ending position. Can be:
- int: Offset number
- str: "latest"
kafka_auth_config: Authentication configuration. See KafkaAuthConfig for details.
timeout_ms: Timeout in milliseconds for every read task to poll until reaching end_offset (default 10000ms).
If the read task does not reach end_offset within the timeout, it will stop polling and return the messages
it has read so far.
Raises:
ValueError: If required configuration is missing.
ImportError: If kafka-python is not installed.
"""
_check_import(self, module="kafka", package="kafka-python")
if not topics:
raise ValueError("topics cannot be empty")
if not bootstrap_servers:
raise ValueError("bootstrap_servers cannot be empty")
if timeout_ms <= 0:
raise ValueError("timeout_ms must be positive")
if isinstance(start_offset, int) and isinstance(end_offset, int):
if start_offset > end_offset:
raise ValueError("start_offset must be less than end_offset")
if isinstance(start_offset, str) and start_offset == "latest":
raise ValueError("start_offset cannot be 'latest'")
if isinstance(end_offset, str) and end_offset == "earliest":
raise ValueError("end_offset cannot be 'earliest'")
# Validate bootstrap_servers format
if isinstance(bootstrap_servers, str):
if not bootstrap_servers or ":" not in bootstrap_servers:
raise ValueError(
f"Invalid bootstrap_servers format: {bootstrap_servers}. "
"Expected 'host:port' or list of 'host:port' strings."
)
elif isinstance(bootstrap_servers, list):
if not bootstrap_servers:
raise ValueError("bootstrap_servers cannot be empty list")
for server in bootstrap_servers:
if not isinstance(server, str) or ":" not in server:
raise ValueError(
f"Invalid bootstrap_servers format: {server}. "
"Expected 'host:port' string."
)
self._topics = topics if isinstance(topics, list) else [topics]
self._bootstrap_servers = (
bootstrap_servers
if isinstance(bootstrap_servers, list)
else [bootstrap_servers]
)
self._start_offset = start_offset
self._end_offset = end_offset
self._kafka_auth_config = kafka_auth_config
self._timeout_ms = timeout_ms
self._target_max_block_size = DataContext.get_current().target_max_block_size
def estimate_inmemory_data_size(self) -> Optional[int]:
"""Return an estimate of the in-memory data size, or None if unknown."""
return None
def get_read_tasks(
self, parallelism: int, per_task_row_limit: Optional[int] = None
) -> List[ReadTask]:
"""Create read tasks for Kafka partitions.
Creates one read task per partition.
Each task reads from a single partition of a single topic.
Args:
parallelism: This argument is deprecated.
per_task_row_limit: Maximum number of rows per read task.
Returns:
List of ReadTask objects, one per partition.
"""
# Discover all partitions for all topics
# We need to create a consumer on the driver to discover partitions
from kafka import KafkaConsumer
# Build minimal consumer config for partition discovery
consumer_config = _build_consumer_config_for_discovery(
self._bootstrap_servers, self._kafka_auth_config
)
# Discover partitions for all topics
topic_partitions = [] # List of (topic, partition) tuples
discovery_consumer = None
try:
discovery_consumer = KafkaConsumer(**consumer_config)
for topic in self._topics:
partitions = discovery_consumer.partitions_for_topic(topic)
if not partitions:
raise ValueError(
f"Topic {topic} has no partitions or doesn't exist"
)
for partition in partitions:
topic_partitions.append((topic, partition))
finally:
if discovery_consumer:
discovery_consumer.close()
# Store config for use in read functions (avoid serialization issues)
bootstrap_servers = self._bootstrap_servers
start_offset = self._start_offset
end_offset = self._end_offset
timeout_ms = self._timeout_ms
kafka_auth_config = self._kafka_auth_config
target_max_block_size = self._target_max_block_size
tasks = []
schema = pa.schema(
[
("offset", pa.int64()),
("key", pa.binary()),
("value", pa.binary()),
("topic", pa.string()),
("partition", pa.int32()),
("timestamp", pa.int64()), # Kafka timestamp in milliseconds
("timestamp_type", pa.int32()), # 0=CreateTime, 1=LogAppendTime
("headers", pa.map_(pa.string(), pa.binary())), # Message headers
]
)
for topic_name, partition_id in topic_partitions:
def create_kafka_read_fn(
topic_name: str = topic_name,
partition_id: int = partition_id,
bootstrap_servers: List[str] = bootstrap_servers,
start_offset: Optional[Union[int, Literal["earliest"]]] = start_offset,
end_offset: Optional[Union[int, Literal["latest"]]] = end_offset,
kafka_auth_config: Optional[KafkaAuthConfig] = kafka_auth_config,
timeout_ms: int = timeout_ms,
target_max_block_size: int = target_max_block_size,
):
"""Create a Kafka read function with captured variables.
This factory function captures configuration variables as default arguments
to avoid serialization issues when the read function is executed remotely
by Ray. Using default arguments ensures all needed config is available
in the remote task without requiring 'self' to be serialized.
"""
def kafka_read_fn() -> Iterable[Block]:
"""Read function for a single Kafka partition using kafka-python.
This function runs remotely in a Ray task. It creates a KafkaConsumer,
reads messages from a single assigned partition, and yields PyArrow tables
incrementally for efficient streaming processing.
"""
from kafka import KafkaConsumer, TopicPartition
# Build consumer configuration
consumer_config = _build_consumer_config_for_read(
bootstrap_servers, kafka_auth_config
)
# Create the Kafka consumer
consumer = KafkaConsumer(**consumer_config)
try:
# Assign only the specific partition for this task
topic_partition = TopicPartition(topic_name, partition_id)
consumer.assign([topic_partition])
start_off, end_off = _resolve_offsets(
consumer, topic_partition, start_offset, end_offset
)
# Seek to the requested starting position
consumer.seek(topic_partition, start_off)
records = []
output_buffer = BlockOutputBuffer(
OutputBlockSizeOption.of(
target_max_block_size=target_max_block_size
)
)
# Main polling loop - read maximum 500 messages per loop (default max_poll_records for KafkaConsumer poll is 500)
partition_done = False
start_time = time.time()
timeout_seconds = timeout_ms / 1000.0
while not partition_done:
# Check if overall timeout has been reached
elapsed_time = time.time() - start_time
if elapsed_time >= timeout_seconds:
logger.warning(
f"Kafka read task timed out after {timeout_ms}ms while reading partition {partition_id} of topic {topic_name}; "
f"end_offset {end_off} was not reached. Returning {len(records)} messages collected in this read task so far."
)
break
# Check if we've reached the end_offset before polling
# This avoids waiting for timeout when no more messages are available
current_position = consumer.position(topic_partition)
if current_position >= end_off:
break
# Calculate remaining timeout for this poll
remaining_timeout_ms = int(
(timeout_seconds - elapsed_time) * 1000
)
# Poll for a batch of messages from Kafka
msg_batch = consumer.poll(
timeout_ms=min(remaining_timeout_ms, 10000),
)
if not msg_batch:
continue
messages = msg_batch.get(topic_partition, [])
for msg in messages:
# Check if we've reached the end offset (for bounded reads)
# Use >= for exclusive end_offset (don't include end_offset message)
if end_off is not None and msg.offset >= end_off:
partition_done = True
break
# Extract all message metadata into a flat record
headers_dict = dict(msg.headers) if msg.headers else {}
records.append(
{
"offset": msg.offset,
"key": msg.key,
"value": msg.value,
"topic": msg.topic,
"partition": msg.partition,
"timestamp": msg.timestamp,
"timestamp_type": msg.timestamp_type,
"headers": headers_dict,
}
)
# Yield incrementally when we hit batch size
if len(records) >= KafkaDatasource.BATCH_SIZE_FOR_YIELD:
table = pa.Table.from_pylist(records)
output_buffer.add_block(table)
while output_buffer.has_next():
yield output_buffer.next()
records = [] # Clear for next batch
# Yield any remaining records
if records:
table = pa.Table.from_pylist(records)
output_buffer.add_block(table)
output_buffer.finalize()
while output_buffer.has_next():
yield output_buffer.next()
finally:
# Always close the consumer to release connections
consumer.close()
return kafka_read_fn
# Create metadata for this task
metadata = BlockMetadata(
num_rows=None,
size_bytes=None,
input_files=[f"kafka://{topic_name}/{partition_id}"],
exec_stats=None,
)
kafka_read_fn = create_kafka_read_fn(topic_name, partition_id)
# Create read task
task = ReadTask(
read_fn=kafka_read_fn,
metadata=metadata,
schema=schema,
per_task_row_limit=per_task_row_limit,
)
tasks.append(task)
return tasks
| KafkaDatasource |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/styles/base.py | {
"start": 3881,
"end": 4277
} | class ____(BaseStyle):
"""
A style that doesn't style anything.
"""
def get_attrs_for_style_str(
self, style_str: str, default: Attrs = DEFAULT_ATTRS
) -> Attrs:
return default
def invalidation_hash(self) -> Hashable:
return 1 # Always the same value.
@property
def style_rules(self) -> list[tuple[str, str]]:
return []
| DummyStyle |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/git_test/package.py | {
"start": 217,
"end": 456
} | class ____(Package):
"""Mock package that uses git for fetching."""
homepage = "http://www.git-fetch-example.com"
# To be set by test
git = None
submodules = True
version("git", git="to-be-filled-in-by-test")
| GitTest |
python | django__django | django/db/models/fields/related_lookups.py | {
"start": 1416,
"end": 4109
} | class ____(In):
def get_prep_lookup(self):
from django.db.models.sql.query import Query # avoid circular import
if isinstance(self.lhs, ColPairs):
if (
isinstance(self.rhs, Query)
and not self.rhs.has_select_fields
and self.lhs.output_field.related_model is self.rhs.model
):
self.rhs.set_values([f.name for f in self.lhs.sources])
else:
if self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_value(). Consider
# case ForeignKey to IntegerField given value 'abc'. The
# ForeignKey itself doesn't have validation for non-integers,
# so we must run validation using the target field.
if hasattr(self.lhs.output_field, "path_infos"):
# Run the target field's get_prep_value. We can safely
# assume there is only one as we don't get to the direct
# value branch otherwise.
target_field = self.lhs.output_field.path_infos[-1].target_fields[
-1
]
self.rhs = [target_field.get_prep_value(v) for v in self.rhs]
elif not getattr(self.rhs, "has_select_fields", True) and not getattr(
self.lhs.field.target_field, "primary_key", False
):
if (
getattr(self.lhs.output_field, "primary_key", False)
and self.lhs.output_field.model == self.rhs.model
):
# A case like
# Restaurant.objects.filter(place__in=restaurant_qs), where
# place is a OneToOneField and the primary key of
# Restaurant.
target_field = self.lhs.field.name
else:
target_field = self.lhs.field.target_field.name
self.rhs.set_values([target_field])
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, ColPairs):
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
lookup = TupleIn(self.lhs, values)
else:
lookup = TupleIn(self.lhs, self.rhs)
return compiler.compile(lookup)
return super().as_sql(compiler, connection)
| RelatedIn |
python | getsentry__sentry | src/sentry/dynamic_sampling/tasks/boost_low_volume_transactions.py | {
"start": 2472,
"end": 8134
} | class ____(ProjectIdentity, total=True):
total_num_transactions: float
total_num_classes: int | float
@instrumented_task(
name="sentry.dynamic_sampling.tasks.boost_low_volume_transactions",
namespace=telemetry_experience_tasks,
processing_deadline_duration=6 * 60 + 5,
retry=Retry(times=5, delay=5),
silo_mode=SiloMode.REGION,
)
@dynamic_sampling_task
def boost_low_volume_transactions() -> None:
num_big_trans = int(
options.get("dynamic-sampling.prioritise_transactions.num_explicit_large_transactions")
)
num_small_trans = int(
options.get("dynamic-sampling.prioritise_transactions.num_explicit_small_transactions")
)
orgs_iterator = GetActiveOrgs(max_projects=MAX_PROJECTS_PER_QUERY, granularity=Granularity(60))
for orgs in orgs_iterator:
# get the low and high transactions
totals_it = FetchProjectTransactionTotals(orgs)
small_transactions_it = FetchProjectTransactionVolumes(
orgs,
large_transactions=False,
max_transactions=num_small_trans,
)
big_transactions_it = FetchProjectTransactionVolumes(
orgs,
large_transactions=True,
max_transactions=num_big_trans,
)
for project_transactions in transactions_zip(
totals_it, big_transactions_it, small_transactions_it
):
boost_low_volume_transactions_of_project.apply_async(
kwargs={"project_transactions": project_transactions},
headers={"sentry-propagate-traces": False},
)
@instrumented_task(
name="sentry.dynamic_sampling.boost_low_volume_transactions_of_project",
namespace=telemetry_experience_tasks,
processing_deadline_duration=4 * 60 + 5,
retry=Retry(times=5, delay=5),
silo_mode=SiloMode.REGION,
)
@dynamic_sampling_task
def boost_low_volume_transactions_of_project(project_transactions: ProjectTransactions) -> None:
org_id = project_transactions["org_id"]
project_id = project_transactions["project_id"]
total_num_transactions = project_transactions.get("total_num_transactions")
total_num_classes = project_transactions.get("total_num_classes")
transactions = [
RebalancedItem(id=id, count=count)
for id, count in project_transactions["transaction_counts"]
]
try:
organization = Organization.objects.get_from_cache(id=org_id)
except Organization.DoesNotExist:
organization = None
# If the org doesn't have dynamic sampling, we want to early return to avoid unnecessary work.
if not has_dynamic_sampling(organization):
return
if is_project_mode_sampling(organization):
sample_rate = ProjectOption.objects.get_value(project_id, "sentry:target_sample_rate")
source = "project_setting"
else:
# We try to use the sample rate that was individually computed for each project, but if we don't find it, we will
# resort to the blended sample rate of the org.
sample_rate, success = get_boost_low_volume_projects_sample_rate(
org_id=org_id,
project_id=project_id,
error_sample_rate_fallback=quotas.backend.get_blended_sample_rate(
organization_id=org_id
),
)
source = "boost_low_volume_projects" if success else "blended_sample_rate"
sample_function(
function=log_sample_rate_source,
_sample_rate=0.1,
org_id=org_id,
project_id=project_id,
used_for="boost_low_volume_transactions",
source=source,
sample_rate=sample_rate,
)
if sample_rate is None:
sentry_sdk.capture_message(
"Sample rate of project not found when trying to adjust the sample rates of "
"its transactions"
)
return
if sample_rate == 1.0:
return
# the model fails when we are not having any transactions, thus we can simply return here
if len(transactions) == 0:
return
intensity = options.get("dynamic-sampling.prioritise_transactions.rebalance_intensity", 1.0)
model = TransactionsRebalancingModel()
rebalanced_transactions = guarded_run(
model,
TransactionsRebalancingInput(
classes=transactions,
sample_rate=sample_rate,
total_num_classes=total_num_classes,
total=total_num_transactions,
intensity=intensity,
),
)
# In case the result of the model is None, it means that an error occurred, thus we want to early return.
if rebalanced_transactions is None:
return
# Only after checking the nullability of rebalanced_transactions, we want to unpack the tuple.
named_rates, implicit_rate = rebalanced_transactions
set_transactions_resampling_rates(
org_id=org_id,
proj_id=project_id,
named_rates=named_rates,
default_rate=implicit_rate,
ttl_ms=DEFAULT_REDIS_CACHE_KEY_TTL,
)
schedule_invalidate_project_config(
project_id=project_id, trigger="dynamic_sampling_boost_low_volume_transactions"
)
def is_same_project(left: ProjectIdentity | None, right: ProjectIdentity | None) -> bool:
if left is None or right is None:
return False
return left["project_id"] == right["project_id"] and left["org_id"] == right["org_id"]
def is_project_identity_before(left: ProjectIdentity, right: ProjectIdentity) -> bool:
return left["org_id"] < right["org_id"] or (
left["org_id"] == right["org_id"] and left["project_id"] < right["project_id"]
)
| ProjectTransactionsTotals |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B909.py | {
"start": 1322,
"end": 3425
} | class ____:
some_list: list
def __init__(self, ls):
self.some_list = list(ls)
a = A((1, 2, 3))
# ensure member accesses are handled as errors
for elem in a.some_list:
a.some_list.remove(0)
del a.some_list[2]
# Augassign should error
foo = [1, 2, 3]
bar = [4, 5, 6]
for _ in foo:
foo *= 2
foo += bar
foo[1] = 9
foo[1:2] = bar
foo[1:2:3] = bar
foo = {1, 2, 3}
bar = {4, 5, 6}
for _ in foo: # should error
foo |= bar
foo &= bar
foo -= bar
foo ^= bar
# more tests for unconditional breaks - should not error
for _ in foo:
foo.remove(1)
for _ in bar:
bar.remove(1)
break
break
# should not error
for _ in foo:
foo.remove(1)
for _ in bar:
...
break
# should error (?)
for _ in foo:
foo.remove(1)
if bar:
bar.remove(1)
break
break
# should error
for _ in foo:
if bar:
pass
else:
foo.remove(1)
# should error
for elem in some_list:
if some_list.pop() == 2:
pass
# should not error
for elem in some_list:
if some_list.pop() == 2:
break
# should error
for elem in some_list:
if some_list.pop() == 2:
pass
else:
break
# should error
for elem in some_list:
del some_list[elem]
some_list.remove(elem)
some_list.discard(elem)
# should not error
for elem in some_list:
some_list[elem] = 1
# should error
for i, elem in enumerate(some_list):
some_list.pop(0)
# should not error (list)
for i, elem in enumerate(some_list):
some_list[i] = 1
# should not error (dict)
for i, elem in enumerate(some_list):
some_list[elem] = 1
# should not error
def func():
for elem in some_list:
if some_list.pop() == 2:
return
# should not error - direct return with mutation (Issue #18399)
def fail_map(mapping):
for key in mapping:
return mapping.pop(key)
def success_map(mapping):
for key in mapping:
ret = mapping.pop(key) # should not error
return ret
def fail_list(seq):
for val in seq:
return seq.pop(4)
| A |
python | ansible__ansible | test/units/inventory/test_host.py | {
"start": 884,
"end": 2314
} | class ____(unittest.TestCase):
ansible_port = 22
def setUp(self):
self.hostA = Host('a')
self.hostB = Host('b')
def test_equality(self):
self.assertEqual(self.hostA, self.hostA)
self.assertNotEqual(self.hostA, self.hostB)
self.assertNotEqual(self.hostA, Host('a'))
def test_hashability(self):
# equality implies the hash values are the same
self.assertEqual(hash(self.hostA), hash(Host('a')))
def test_get_vars(self):
host_vars = self.hostA.get_vars()
self.assertIsInstance(host_vars, dict)
def test_repr(self):
host_repr = repr(self.hostA)
self.assertIsInstance(host_repr, str)
def test_add_group(self):
group = Group('some_group')
group_len = len(self.hostA.groups)
self.hostA.add_group(group)
self.assertEqual(len(self.hostA.groups), group_len + 1)
def test_get_groups(self):
group = Group('some_group')
self.hostA.add_group(group)
groups = self.hostA.get_groups()
self.assertEqual(len(groups), 1)
for _group in groups:
self.assertIsInstance(_group, Group)
def test_equals_none(self):
other = None
assert not (self.hostA == other)
assert not (other == self.hostA)
assert self.hostA != other
assert other != self.hostA
self.assertNotEqual(self.hostA, other)
| TestHost |
python | django-guardian__django-guardian | guardian/testapp/models.py | {
"start": 2552,
"end": 2840
} | class ____(models.Model):
"""
Model for tests obj perms checks with generic group object permissions model
and generic group object permissions model.
"""
name = models.CharField(max_length=128, unique=True)
def __str__(self):
return self.name
| ReverseMixed |
python | google__pytype | pytype/tests/test_base.py | {
"start": 1711,
"end": 13890
} | class ____(unittest.TestCase):
"""Base class for implementing tests that check PyTD output."""
_loader: load_pytd.Loader
python_version: tuple[int, int] = sys.version_info[:2]
@classmethod
def setUpClass(cls):
super().setUpClass()
# We use class-wide loader to avoid creating a new loader for every test
# method if not required.
cls._loader = None
def setUp(self):
super().setUp()
self.options = config.Options.create(
python_version=self.python_version,
bind_decorated_methods=True,
none_is_not_bool=True,
overriding_renamed_parameter_count_checks=True,
strict_parameter_checks=True,
strict_undefined_checks=True,
strict_primitive_comparisons=True,
strict_none_binding=True,
use_fiddle_overlay=True,
use_functools_partial_overlay=True,
use_rewrite=_USE_REWRITE,
validate_version=False,
)
@property
def loader(self):
if not _MatchLoaderConfig(self.options, self._loader):
# Create a new loader only if the configuration in the current options
# does not match the configuration in the current loader.
self._loader = load_pytd.create_loader(self.options)
return self._loader
@property
def analyze_lib(self):
return rewrite_analyze if self.options.use_rewrite else analyze
def ConfigureOptions(self, **kwargs):
assert (
"python_version" not in kwargs
), "Individual tests cannot set the python_version of the config options."
self.options.tweak(**kwargs)
def _GetPythonpathArgs(self, pythonpath, imports_map):
"""Gets values for --pythonpath and --imports_map."""
if pythonpath:
pythonpath_arg = pythonpath
imports_map_arg = imports_map
elif imports_map:
pythonpath_arg = [""]
imports_map_arg = imports_map
else:
pythonpath_arg = self.options.pythonpath
imports_map_arg = self.options.imports_map
return {"pythonpath": pythonpath_arg, "imports_map": imports_map_arg}
# For historical reasons (byterun), this method name is snakecase:
# pylint: disable=invalid-name
def Check(
self,
code,
pythonpath=(),
skip_repeat_calls=True,
report_errors=True,
quick=False,
imports_map=None,
**kwargs,
):
"""Run an inference smoke test for the given code."""
self.ConfigureOptions(
skip_repeat_calls=skip_repeat_calls,
quick=quick,
**self._GetPythonpathArgs(pythonpath, imports_map),
)
try:
src = _Format(code)
if test_utils.ErrorMatcher(code).expected:
self.fail("Cannot assert errors with Check(); use CheckWithErrors()")
ret = self.analyze_lib.check_types(
src, loader=self.loader, options=self.options, **kwargs
)
errorlog = ret.context.errorlog
except directors.SkipFileError:
errorlog = None
if report_errors and errorlog:
errorlog.print_to_stderr()
self.fail(f"Checker found {len(errorlog)} errors:\n{errorlog}")
def assertNoCrash(self, method, code, **kwargs):
method(code, report_errors=False, **kwargs)
def _SetUpErrorHandling(
self, code, pythonpath, analyze_annotated, quick, imports_map
):
code = _Format(code)
self.ConfigureOptions(
analyze_annotated=analyze_annotated,
quick=quick,
**self._GetPythonpathArgs(pythonpath, imports_map),
)
return {"src": code, "options": self.options, "loader": self.loader}
def InferWithErrors(
self,
code,
pythonpath=(),
module_name=None,
analyze_annotated=True,
quick=False,
imports_map=None,
**kwargs,
):
"""Runs inference on code expected to have type errors."""
kwargs.update(
self._SetUpErrorHandling(
code, pythonpath, analyze_annotated, quick, imports_map
)
)
self.ConfigureOptions(module_name=module_name)
ret = self.analyze_lib.infer_types(**kwargs)
unit = ret.ast
assert unit is not None
unit.Visit(visitors.VerifyVisitor())
unit = optimize.Optimize(
unit,
ret.ast_deps,
lossy=False,
use_abcs=False,
max_union=7,
remove_mutable=False,
)
errorlog = ret.context.errorlog
src = kwargs["src"]
matcher = test_utils.ErrorMatcher(src)
matcher.assert_errors_match_expected(errorlog)
return pytd_utils.CanonicalOrdering(unit), matcher
def CheckWithErrors(
self,
code,
pythonpath=(),
analyze_annotated=True,
quick=False,
imports_map=None,
**kwargs,
):
"""Check and match errors."""
kwargs.update(
self._SetUpErrorHandling(
code, pythonpath, analyze_annotated, quick, imports_map
)
)
ret = self.analyze_lib.check_types(**kwargs)
errorlog = ret.context.errorlog
src = kwargs["src"]
matcher = test_utils.ErrorMatcher(src)
matcher.assert_errors_match_expected(errorlog)
return matcher
def InferFromFile(self, filename, pythonpath):
"""Runs inference on the contents of a file."""
with open(filename) as fi:
code = fi.read()
if test_utils.ErrorMatcher(code).expected:
self.fail(
"Cannot assert errors with InferFromFile(); use InferWithErrors()"
)
self.ConfigureOptions(
input=filename,
module_name=module_utils.get_module_name(filename, pythonpath),
pythonpath=pythonpath,
)
ret = self.analyze_lib.infer_types(
code, options=self.options, loader=self.loader
)
unit = ret.ast
assert unit is not None
unit.Visit(visitors.VerifyVisitor())
return pytd_utils.CanonicalOrdering(unit)
def assertErrorRegexes(self, matcher, expected_errors):
matcher.assert_error_regexes(expected_errors)
def assertErrorSequences(self, matcher, expected_errors):
matcher.assert_error_sequences(expected_errors)
def assertDiagnosticRegexes(self, matcher, expected_errors):
matcher.assert_diagnostic_regexes(expected_errors)
def assertDiagnosticMessages(self, matcher, expected_errors):
matcher.assert_diagnostic_messages(expected_errors)
def _PickleAst(self, ast, module_name):
assert module_name
ast = serialize_ast.PrepareForExport(module_name, ast, self.loader)
return pickle_utils.Serialize(ast)
def _PickleSource(self, src, module_name):
ast = serialize_ast.SourceToExportableAst(
module_name, textwrap.dedent(src), self.loader
)
return pickle_utils.Serialize(ast)
def Infer(
self,
srccode,
pythonpath=(),
report_errors=True,
analyze_annotated=True,
pickle=False,
module_name=None,
**kwargs,
):
"""Runs inference on srccode."""
types, deps = self._InferAndVerify(
_Format(srccode),
pythonpath=pythonpath,
analyze_annotated=analyze_annotated,
module_name=module_name,
report_errors=report_errors,
**kwargs,
)
types = optimize.Optimize(
types,
deps,
lossy=False,
use_abcs=False,
max_union=7,
remove_mutable=False,
)
types = pytd_utils.CanonicalOrdering(types)
if pickle:
return self._PickleAst(types, module_name)
else:
return types
def _InferAndVerify(
self,
src,
pythonpath,
module_name,
report_errors,
analyze_annotated,
imports_map=None,
quick=False,
**kwargs,
):
"""Infer types for the source code treating it as a module.
Used by Infer().
Args:
src: The source code of a module. Treat it as "__main__".
pythonpath: --pythonpath as list/tuple of string
module_name: Name of the module we're analyzing. E.g. "foo.bar.mymodule".
report_errors: Whether to fail if the type inferencer reports any errors
in the program.
analyze_annotated: Whether to analyze functions with return annotations.
imports_map: --imports_info data
quick: Try to run faster, by avoiding costly computations.
**kwargs: Keyword parameters to pass through to the type inferencer.
Raises:
AssertionError: If report_errors is True and we found errors.
Returns:
A pytd.TypeDeclUnit
"""
self.ConfigureOptions(
module_name=module_name,
quick=quick,
use_pickled_files=True,
analyze_annotated=analyze_annotated,
**self._GetPythonpathArgs(pythonpath, imports_map),
)
if test_utils.ErrorMatcher(src).expected:
self.fail("Cannot assert errors with Infer(); use InferWithErrors()")
ret = self.analyze_lib.infer_types(
src, options=self.options, loader=self.loader, **kwargs
)
errorlog = ret.context.errorlog
unit = ret.ast
assert unit is not None
unit.Visit(visitors.VerifyVisitor())
if report_errors and errorlog:
errorlog.print_to_stderr()
self.fail(f"Inferencer found {len(errorlog)} errors:\n{errorlog}")
return unit, ret.ast_deps
def assertTypesMatchPytd(self, ty, pytd_src):
"""Parses pytd_src and compares with ty."""
pytd_tree = parser.parse_string(
textwrap.dedent(pytd_src),
options=parser.PyiOptions(python_version=self.python_version),
)
pytd_tree = pytd_tree.Visit(
visitors.LookupBuiltins(self.loader.builtins, full_names=False)
)
pytd_tree = pytd_tree.Visit(visitors.LookupLocalTypes())
pytd_tree = pytd_tree.Visit(visitors.ClassTypeToNamedType())
pytd_tree = pytd_tree.Visit(visitors.CanonicalOrderingVisitor())
pytd_tree.Visit(visitors.VerifyVisitor())
ty = ty.Visit(visitors.ClassTypeToNamedType())
ty = ty.Visit(visitors.AdjustSelf())
ty = ty.Visit(visitors.CanonicalOrderingVisitor())
ty.Visit(visitors.VerifyVisitor())
ty_src = pytd_utils.Print(ty) + "\n"
pytd_tree_src = pytd_utils.Print(pytd_tree) + "\n"
log.info("========== result ==========")
_LogLines(log.info, ty_src)
log.info("========== expected ==========")
_LogLines(log.info, pytd_tree_src)
log.info("==============================")
# In the diff output, mark expected with "-" and actual with "+".
# (In other words, display a change from "working" to "broken")
self.assertMultiLineEqual(pytd_tree_src, ty_src)
@contextlib.contextmanager
def DepTree(self, deps):
"""Creates a tree of .pyi deps."""
old_pythonpath = self.options.pythonpath
old_imports_map = self.options.imports_map
old_use_pickled_files = self.options.use_pickled_files
try:
with test_utils.Tempdir() as d:
self.ConfigureOptions(
pythonpath=[""], imports_map=imports_map_lib.ImportsMap()
)
use_pickled_files = False
for dep in deps:
if len(dep) == 3:
path, contents, opts = dep
else:
path, contents = dep
opts = {}
base, ext = path_utils.splitext(path)
pickle = opts.get("pickle", False)
use_pickled_files |= pickle
new_path = base + (".pickled" if pickle else ".pyi")
if ext == ".pyi":
if pickle:
contents = self._PickleSource(contents, base)
filepath = d.create_file(new_path, contents)
elif ext == ".py":
pyi = self.Infer(contents, module_name=base, **opts)
if not pickle:
pyi = pytd_utils.Print(pyi)
filepath = d.create_file(new_path, pyi)
else:
raise ValueError(f"Unrecognised dependency type: {path}")
self.options.imports_map.items[base] = filepath
self.options.use_pickled_files = use_pickled_files
yield d
finally:
self.ConfigureOptions(
pythonpath=old_pythonpath,
imports_map=old_imports_map,
use_pickled_files=old_use_pickled_files,
)
def _PrintErrorDebug(descr, value):
log.error("=============== %s ===========", descr)
_LogLines(log.error, value)
log.error("=========== end %s ===========", descr)
def _LogLines(log_cmd, lines):
for l in lines.split("\n"):
log_cmd("%s", l)
| BaseTest |
python | pytorch__pytorch | torch/ao/ns/_numeric_suite.py | {
"start": 6632,
"end": 7233
} | class ____(Logger):
r"""Class used in Shadow module to record the outputs of the original and
shadow modules.
"""
def __init__(self):
super().__init__()
self.stats["float"] = []
self.stats["quantized"] = []
def forward(self, x, y): # type: ignore[override]
# fmt: off
"""
""" # blank docblock to make autodoc happy
# fmt: on
if len(x) > 1:
x = x[0]
if len(y) > 1:
y = y[0]
self.stats["quantized"].append(x.detach())
self.stats["float"].append(y.detach())
| ShadowLogger |
python | cherrypy__cherrypy | cherrypy/test/test_etags.py | {
"start": 87,
"end": 3141
} | class ____(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def resource(self):
return 'Oh wah ta goo Siam.'
@cherrypy.expose
def fail(self, code):
code = int(code)
if 300 <= code <= 399:
raise cherrypy.HTTPRedirect([], code)
else:
raise cherrypy.HTTPError(code)
@cherrypy.expose
# In Python 3, tools.encode is on by default
@cherrypy.config(**{'tools.encode.on': True})
def unicoded(self):
return ntou('I am a \u1ee4nicode string.', 'escape')
conf = {
'/': {
'tools.etags.on': True,
'tools.etags.autotags': True,
},
}
cherrypy.tree.mount(Root(), config=conf)
def test_etags(self):
self.getPage('/resource')
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('Oh wah ta goo Siam.')
etag = self.assertHeader('ETag')
# Test If-Match (both valid and invalid)
self.getPage('/resource', headers=[('If-Match', etag)])
self.assertStatus('200 OK')
self.getPage('/resource', headers=[('If-Match', '*')])
self.assertStatus('200 OK')
self.getPage('/resource', headers=[('If-Match', '*')], method='POST')
self.assertStatus('200 OK')
self.getPage('/resource', headers=[('If-Match', 'a bogus tag')])
self.assertStatus('412 Precondition Failed')
# Test If-None-Match (both valid and invalid)
self.getPage('/resource', headers=[('If-None-Match', etag)])
self.assertStatus(304)
self.getPage(
'/resource',
method='POST',
headers=[('If-None-Match', etag)],
)
self.assertStatus('412 Precondition Failed')
self.getPage('/resource', headers=[('If-None-Match', '*')])
self.assertStatus(304)
self.getPage('/resource', headers=[('If-None-Match', 'a bogus tag')])
self.assertStatus('200 OK')
def test_errors(self):
self.getPage('/resource')
self.assertStatus(200)
etag = self.assertHeader('ETag')
# Test raising errors in page handler
self.getPage('/fail/412', headers=[('If-Match', etag)])
self.assertStatus(412)
self.getPage('/fail/304', headers=[('If-Match', etag)])
self.assertStatus(304)
self.getPage('/fail/412', headers=[('If-None-Match', '*')])
self.assertStatus(412)
self.getPage('/fail/304', headers=[('If-None-Match', '*')])
self.assertStatus(304)
def test_unicode_body(self):
self.getPage('/unicoded')
self.assertStatus(200)
etag1 = self.assertHeader('ETag')
self.getPage('/unicoded', headers=[('If-Match', etag1)])
self.assertStatus(200)
self.assertHeader('ETag', etag1)
| ETagTest |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_pair_values_lat_lng_matches_geohash.py | {
"start": 927,
"end": 5388
} | class ____(ColumnPairMapExpectation):
"""Expect latlngs in column A to match with geohashes in column B.
The more digits a geohash has, the smaller and more precise of an area it represents.
When converting a latlng to a geohash, we are only asserting that it falls somewhere within the other geohash
we're comparing it with. To verify this, we only need to make sure that they share their left-most digits.
For example, dpz8 contains dpz80 (in addition to any other geohash that begins with "dpz8".
expect_column_pair_values_lat_lng_matches_geohash is a \
[Column Pair Map Expectation](https://docs.greatexpectations.io/docs/oss/guides/expectations/creating_custom_expectations/how_to_create_custom_column_pair_map_expectations)
Args:
column_A (str): \
The first column name
column_B (str): \
The second column name
Keyword Args:
ignore_row_if (str): \
"all_values_are_missing", "any_value_is_missing", "never"
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
"""
ignore_row_if: Literal["both_values_are_missing", "either_value_is_missing", "neither"] = (
"both_values_are_missing"
)
examples: ClassVar[List[dict]] = [
{
"data": {
"latlngs": [
(43.681640625, -79.27734375),
(43.681640620, -79.27734370),
(43.681640615, -79.27734385),
],
"geohashes_same": ["dpz8", "dpz8", "dpz8"],
"geohashes_different": ["d", "dpz8", "dpz87zzzzzzz"],
"geohashes_incorrect": ["dpz8", "dpz7", "dpz6"],
},
"tests": [
{
"title": "basic_positive",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column_A": "latlngs", "column_B": "geohashes_same"},
"out": {
"success": True,
},
},
{
"title": "basic_negative",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column_A": "latlngs", "column_B": "geohashes_incorrect"},
"out": {
"success": False,
},
},
{
"title": "basic_negative",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column_A": "latlngs", "column_B": "geohashes_different"},
"out": {
"success": True,
},
},
],
}
]
map_metric: ClassVar[str] = "column_pair_values.lat_lng_matches_geohash"
success_keys: ClassVar[Tuple[str, ...]] = (
"column_A",
"column_B",
"ignore_row_if",
"mostly",
)
# This dictionary contains metadata for display in the public gallery
library_metadata: ClassVar[dict] = {
"tags": [
"geospatial",
"hackathon-22",
"multi-column expectation",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@chrisarnold91", # Don't forget to add your github handle here!
],
"requirements": ["python-geohash", "pandas"],
}
if __name__ == "__main__":
ExpectColumnPairValuesLatLngMatchesGeohash().print_diagnostic_checklist()
| ExpectColumnPairValuesLatLngMatchesGeohash |
python | doocs__leetcode | solution/0400-0499/0484.Find Permutation/Solution.py | {
"start": 0,
"end": 339
} | class ____:
def findPermutation(self, s: str) -> List[int]:
n = len(s)
ans = list(range(1, n + 2))
i = 0
while i < n:
j = i
while j < n and s[j] == 'D':
j += 1
ans[i : j + 1] = ans[i : j + 1][::-1]
i = max(i + 1, j)
return ans
| Solution |
python | getsentry__sentry | src/sentry/issues/endpoints/shared_group_details.py | {
"start": 536,
"end": 2242
} | class ____(Endpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = ()
def get(
self,
request: Request,
organization_id_or_slug: int | str | None = None,
share_id: str | None = None,
) -> Response:
"""
Retrieve an aggregate
Return details on an individual aggregate specified by it's shared ID.
{method} {path}
Note: This is not the equivalent of what you'd receive with the standard
group details endpoint. Data is more restrictive and designed
specifically for sharing.
"""
if share_id is None:
raise ResourceDoesNotExist
try:
group = Group.objects.from_share_id(share_id)
except Group.DoesNotExist:
raise ResourceDoesNotExist
# Checks if the organization_id_or_slug matches the group organization's id or slug
if organization_id_or_slug:
if str(organization_id_or_slug).isdecimal():
if int(organization_id_or_slug) != group.organization.id:
raise ResourceDoesNotExist
else:
if organization_id_or_slug != group.organization.slug:
raise ResourceDoesNotExist
if group.organization.flags.disable_shared_issues:
raise ResourceDoesNotExist
context = serialize(
group,
request.user,
SharedGroupSerializer(
environment_func=get_environment_func(request, group.project.organization_id)
),
)
return Response(context)
| SharedGroupDetailsEndpoint |
python | getsentry__sentry | tests/sentry/seer/explorer/test_index_data.py | {
"start": 551,
"end": 6433
} | class ____(APITransactionTestCase, SnubaTestCase, SpanTestCase):
def setUp(self) -> None:
super().setUp()
self.ten_mins_ago = before_now(minutes=10)
def test_get_transactions_for_project(self) -> None:
"""Test the full end-to-end happy path for get_transactions_for_project."""
# Create spans for different transactions with varying total time spent
# Format: (transaction_name, count, avg_duration_ms)
transactions_data = [
("api/users/profile", 5, 100.0), # 5 * 100 = 500ms total (highest)
("api/posts/create", 3, 150.0), # 3 * 150 = 450ms total (middle)
("api/health", 10, 10.0), # 10 * 10 = 100ms total (lowest, despite high count)
]
# Store transaction spans with different volumes and durations
spans = []
for transaction_name, count, duration_ms in transactions_data:
for i in range(count):
span = self.create_span(
{
"description": f"transaction-span-{i}",
"sentry_tags": {"transaction": transaction_name},
"is_segment": True, # This marks it as a transaction span
"duration_ms": duration_ms,
},
start_ts=self.ten_mins_ago + timedelta(minutes=i),
)
spans.append(span)
# Also add some non-transaction spans that should be ignored
if i < 2: # Add 2 non-transaction spans per transaction
non_tx_span = self.create_span(
{
"description": f"regular-span-{i}",
"sentry_tags": {"transaction": transaction_name},
"is_segment": False, # This marks it as a regular span
"duration_ms": 50.0,
},
start_ts=self.ten_mins_ago + timedelta(minutes=i, seconds=30),
)
spans.append(non_tx_span)
self.store_spans(spans, is_eap=True)
# Call our function
result = get_transactions_for_project(self.project.id)
# Verify basic structure and data
assert len(result) == 3
# Should be sorted by total time spent (sum of duration) descending
transaction_names = [t.name for t in result]
assert transaction_names[0] == "api/users/profile" # 500ms total (highest)
assert transaction_names[1] == "api/posts/create" # 450ms total (middle)
assert transaction_names[2] == "api/health" # 100ms total (lowest despite high count)
# Verify all transactions have correct project_id and structure
for transaction in result:
assert transaction.project_id == self.project.id
assert hasattr(transaction, "name")
assert isinstance(transaction.name, str)
assert len(transaction.name) > 0
def test_get_trace_for_transaction(self) -> None:
transaction_name = "api/users/profile"
# Create multiple traces with different span counts
traces_data = [
(5, "trace-medium", 0), # 5 spans - starts at offset 0 (earliest)
(2, "trace-small", 10), # 2 spans - starts at offset 10 minutes
(8, "trace-large", 20), # 8 spans - starts at offset 20 minutes
]
spans = []
trace_ids = []
expected_trace_id = None
for span_count, trace_suffix, start_offset_minutes in traces_data:
# Generate a unique trace ID
trace_id = uuid.uuid4().hex
trace_ids.append(trace_id)
if trace_suffix == "trace-medium":
expected_trace_id = trace_id
for i in range(span_count):
# Create spans for this trace
span = self.create_span(
{
"description": f"span-{i}-{trace_suffix}",
"sentry_tags": {"transaction": transaction_name},
"trace_id": trace_id,
"parent_span_id": None if i == 0 else f"parent-{i-1}",
"is_segment": i == 0, # First span is the transaction span
},
start_ts=self.ten_mins_ago + timedelta(minutes=start_offset_minutes + i),
)
spans.append(span)
self.store_spans(spans, is_eap=True)
# Call our function
result = get_trace_for_transaction(transaction_name, self.project.id)
# Verify basic structure
assert result is not None
assert result.transaction_name == transaction_name
assert result.project_id == self.project.id
assert result.trace_id in trace_ids
# Should choose the first trace by start_ts (trace-medium with 5 spans)
assert result.trace_id == expected_trace_id
assert result.total_spans == 5
assert len(result.spans) == 5
# Verify all spans have correct structure and belong to the chosen trace
for result_span in result.spans:
assert hasattr(result_span, "span_id")
assert hasattr(result_span, "span_description")
assert hasattr(result_span, "parent_span_id")
assert hasattr(result_span, "span_op")
assert result_span.span_description is not None
assert result_span.span_description.startswith("span-")
assert "trace-medium" in result_span.span_description # Should be from the first trace
# Verify parent-child relationships are preserved
root_spans = [s for s in result.spans if s.parent_span_id is None]
assert len(root_spans) == 1 # Should have exactly one root span
| TestGetTransactionsForProject |
python | openai__openai-python | tests/test_transform.py | {
"start": 12061,
"end": 12179
} | class ____(TypedDict):
foo: Annotated[Union[Bar8, Iterable[Baz8]], PropertyInfo(alias="FOO")]
| TypedDictIterableUnion |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/widgets/serversconfig.py | {
"start": 1800,
"end": 3875
} | class ____(object):
"""Convenience class to store LSP Server configuration values."""
def __init__(self, language=None, cmd='', host='127.0.0.1',
port=2084, args='', external=False, stdio=False,
configurations={}, set_option=None, get_option=None,
remove_option=None):
self.index = 0
self.language = language
if self.language in LSP_LANGUAGE_NAME:
self.language = LSP_LANGUAGE_NAME[self.language]
self.cmd = cmd
self.args = args
self.configurations = configurations
self.port = port
self.host = host
self.external = external
self.stdio = stdio
self.set_option = set_option
self.get_option = get_option
self.remove_option = remove_option
def __repr__(self):
base_str = '[{0}] {1} {2} ({3}:{4})'
fmt_args = [self.language, self.cmd, self.args,
self.host, self.port]
if self.stdio:
base_str = '[{0}] {1} {2}'
fmt_args = [self.language, self.cmd, self.args]
if self.external:
base_str = '[{0}] {1}:{2}'
fmt_args = [self.language, self.host, self.port]
return base_str.format(*fmt_args)
def __str__(self):
return self.__repr__()
def __unicode__(self):
return self.__repr__()
def load(self):
if self.language is not None:
state = self.get_option(self.language.lower())
self.__dict__.update(state)
def save(self):
if self.language is not None:
language = self.language.lower()
dict_repr = dict(self.__dict__)
dict_repr.pop('set_option')
dict_repr.pop('get_option')
dict_repr.pop('remove_option')
self.set_option(language, dict_repr,
recursive_notification=False)
def delete(self):
if self.language is not None:
language = self.language.lower()
self.remove_option(language)
| LSPServer |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 302114,
"end": 302963
} | class ____(Request):
"""
Gets task information
:param task: Task ID
:type task: str
"""
_service = "tasks"
_action = "get_by_id"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task ID", "type": "string"}},
"required": ["task"],
"type": "object",
}
def __init__(self, task: str, **kwargs: Any) -> None:
super(GetByIdRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| GetByIdRequest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.