language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/2700-2799/2739.Total Distance Traveled/Solution.py | {
"start": 0,
"end": 339
} | class ____:
def distanceTraveled(self, mainTank: int, additionalTank: int) -> int:
ans = cur = 0
while mainTank:
cur += 1
ans += 10
mainTank -= 1
if cur % 5 == 0 and additionalTank:
additionalTank -= 1
mainTank += 1
return ans
| Solution |
python | wandb__wandb | wandb/vendor/pygments/lexers/fantom.py | {
"start": 499,
"end": 9982
} | class ____(RegexLexer):
"""
For Fantom source code.
.. versionadded:: 1.5
"""
name = 'Fantom'
aliases = ['fan']
filenames = ['*.fan']
mimetypes = ['application/x-fantom']
# often used regexes
def s(str):
return Template(str).substitute(
dict(
pod=r'[\"\w\.]+',
eos=r'\n|;',
id=r'[a-zA-Z_]\w*',
# all chars which can be part of type definition. Starts with
# either letter, or [ (maps), or | (funcs)
type=r'(?:\[|[a-zA-Z_]|\|)[:\w\[\]|\->?]*?',
)
)
tokens = {
'comments': [
(r'(?s)/\*.*?\*/', Comment.Multiline), # Multiline
(r'//.*?\n', Comment.Single), # Single line
# TODO: highlight references in fandocs
(r'\*\*.*?\n', Comment.Special), # Fandoc
(r'#.*\n', Comment.Single) # Shell-style
],
'literals': [
(r'\b-?[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration
(r'\b-?[\d_]*\.[\d_]+(ns|ms|sec|min|hr|day)', Number), # Duration with dot
(r'\b-?(\d+)?\.\d+(f|F|d|D)?', Number.Float), # Float/Decimal
(r'\b-?0x[0-9a-fA-F_]+', Number.Hex), # Hex
(r'\b-?[\d_]+', Number.Integer), # Int
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), # Char
(r'"', Punctuation, 'insideStr'), # Opening quote
(r'`', Punctuation, 'insideUri'), # Opening accent
(r'\b(true|false|null)\b', Keyword.Constant), # Bool & null
(r'(?:(\w+)(::))?(\w+)(<\|)(.*?)(\|>)', # DSL
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, String, Punctuation)),
(r'(?:(\w+)(::))?(\w+)?(#)(\w+)?', # Type/slot literal
bygroups(Name.Namespace, Punctuation, Name.Class,
Punctuation, Name.Function)),
(r'\[,\]', Literal), # Empty list
(s(r'($type)(\[,\])'), # Typed empty list
bygroups(using(this, state='inType'), Literal)),
(r'\[:\]', Literal), # Empty Map
(s(r'($type)(\[:\])'),
bygroups(using(this, state='inType'), Literal)),
],
'insideStr': [
(r'\\\\', String.Escape), # Escaped backslash
(r'\\"', String.Escape), # Escaped "
(r'\\`', String.Escape), # Escaped `
(r'\$\w+', String.Interpol), # Subst var
(r'\$\{.*?\}', String.Interpol), # Subst expr
(r'"', Punctuation, '#pop'), # Closing quot
(r'.', String) # String content
],
'insideUri': [ # TODO: remove copy/paste str/uri
(r'\\\\', String.Escape), # Escaped backslash
(r'\\"', String.Escape), # Escaped "
(r'\\`', String.Escape), # Escaped `
(r'\$\w+', String.Interpol), # Subst var
(r'\$\{.*?\}', String.Interpol), # Subst expr
(r'`', Punctuation, '#pop'), # Closing tick
(r'.', String.Backtick) # URI content
],
'protectionKeywords': [
(r'\b(public|protected|private|internal)\b', Keyword),
],
'typeKeywords': [
(r'\b(abstract|final|const|native|facet|enum)\b', Keyword),
],
'methodKeywords': [
(r'\b(abstract|native|once|override|static|virtual|final)\b',
Keyword),
],
'fieldKeywords': [
(r'\b(abstract|const|final|native|override|static|virtual|'
r'readonly)\b', Keyword)
],
'otherKeywords': [
(words((
'try', 'catch', 'throw', 'finally', 'for', 'if', 'else', 'while',
'as', 'is', 'isnot', 'switch', 'case', 'default', 'continue',
'break', 'do', 'return', 'get', 'set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'\b(it|this|super)\b', Name.Builtin.Pseudo),
],
'operators': [
(r'\+\+|\-\-|\+|\-|\*|/|\|\||&&|<=>|<=|<|>=|>|=|!|\[|\]', Operator)
],
'inType': [
(r'[\[\]|\->:?]', Punctuation),
(s(r'$id'), Name.Class),
default('#pop'),
],
'root': [
include('comments'),
include('protectionKeywords'),
include('typeKeywords'),
include('methodKeywords'),
include('fieldKeywords'),
include('literals'),
include('otherKeywords'),
include('operators'),
(r'using\b', Keyword.Namespace, 'using'), # Using stmt
(r'@\w+', Name.Decorator, 'facet'), # Symbol
(r'(class|mixin)(\s+)(\w+)', bygroups(Keyword, Text, Name.Class),
'inheritance'), # Inheritance list
# Type var := val
(s(r'($type)([ \t]+)($id)(\s*)(:=)'),
bygroups(using(this, state='inType'), Text,
Name.Variable, Text, Operator)),
# var := val
(s(r'($id)(\s*)(:=)'),
bygroups(Name.Variable, Text, Operator)),
# .someId( or ->someId( ###
(s(r'(\.|(?:\->))($id)(\s*)(\()'),
bygroups(Operator, Name.Function, Text, Punctuation),
'insideParen'),
# .someId or ->someId
(s(r'(\.|(?:\->))($id)'),
bygroups(Operator, Name.Function)),
# new makeXXX (
(r'(new)(\s+)(make\w*)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
# Type name (
(s(r'($type)([ \t]+)' # Return type and whitespace
r'($id)(\s*)(\()'), # method name + open brace
bygroups(using(this, state='inType'), Text,
Name.Function, Text, Punctuation),
'insideMethodDeclArgs'),
# ArgType argName,
(s(r'($type)(\s+)($id)(\s*)(,)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
# ArgType argName)
# Covered in 'insideParen' state
# ArgType argName -> ArgType|
(s(r'($type)(\s+)($id)(\s*)(\->)(\s*)($type)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation, Text, using(this, state='inType'),
Punctuation)),
# ArgType argName|
(s(r'($type)(\s+)($id)(\s*)(\|)'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation)),
# Type var
(s(r'($type)([ \t]+)($id)'),
bygroups(using(this, state='inType'), Text,
Name.Variable)),
(r'\(', Punctuation, 'insideParen'),
(r'\{', Punctuation, 'insideBrace'),
(r'.', Text)
],
'insideParen': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'insideMethodDeclArgs': [
(r'\)', Punctuation, '#pop'),
(s(r'($type)(\s+)($id)(\s*)(\))'),
bygroups(using(this, state='inType'), Text, Name.Variable,
Text, Punctuation), '#pop'),
include('root'),
],
'insideBrace': [
(r'\}', Punctuation, '#pop'),
include('root'),
],
'inheritance': [
(r'\s+', Text), # Whitespace
(r':|,', Punctuation),
(r'(?:(\w+)(::))?(\w+)',
bygroups(Name.Namespace, Punctuation, Name.Class)),
(r'\{', Punctuation, '#pop')
],
'using': [
(r'[ \t]+', Text), # consume whitespaces
(r'(\[)(\w+)(\])',
bygroups(Punctuation, Comment.Special, Punctuation)), # ffi
(r'(\")?([\w.]+)(\")?',
bygroups(Punctuation, Name.Namespace, Punctuation)), # podname
(r'::', Punctuation, 'usingClass'),
default('#pop')
],
'usingClass': [
(r'[ \t]+', Text), # consume whitespaces
(r'(as)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Name.Class), '#pop:2'),
(r'[\w$]+', Name.Class),
default('#pop:2') # jump out to root state
],
'facet': [
(r'\s+', Text),
(r'\{', Punctuation, 'facetFields'),
default('#pop')
],
'facetFields': [
include('comments'),
include('literals'),
include('operators'),
(r'\s+', Text),
(r'(\s*)(\w+)(\s*)(=)', bygroups(Text, Name, Text, Operator)),
(r'\}', Punctuation, '#pop'),
(r'.', Text)
],
}
| FantomLexer |
python | doocs__leetcode | solution/1500-1599/1552.Magnetic Force Between Two Balls/Solution.py | {
"start": 0,
"end": 434
} | class ____:
def maxDistance(self, position: List[int], m: int) -> int:
def check(f: int) -> bool:
prev = -inf
cnt = 0
for curr in position:
if curr - prev >= f:
prev = curr
cnt += 1
return cnt < m
position.sort()
l, r = 1, position[-1]
return bisect_left(range(l, r + 1), True, key=check)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/mro2.py | {
"start": 243,
"end": 307
} | class ____(A):
def foo(self, v1: float):
return None
| B |
python | pypa__warehouse | tests/unit/packaging/test_services.py | {
"start": 35488,
"end": 35697
} | class ____:
def test_notimplementederror(self):
with pytest.raises(NotImplementedError):
GenericLocalBlobStorage.create_service(pretend.stub(), pretend.stub())
| TestGenericLocalBlobStorage |
python | getsentry__sentry | src/sentry/analytics/events/codeowners_created.py | {
"start": 75,
"end": 256
} | class ____(analytics.Event):
user_id: int | None = None
organization_id: int
project_id: int
codeowners_id: int
analytics.register(CodeOwnersCreated)
| CodeOwnersCreated |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/handle.py | {
"start": 1704,
"end": 2406
} | class ____(NamedTuple("_UnresolvedStepHandle", [("node_handle", NodeHandle)])):
"""A reference to an UnresolvedMappedExecutionStep in an execution."""
def __new__(cls, node_handle: NodeHandle):
return super().__new__(
cls,
node_handle=check.inst_param(node_handle, "node_handle", NodeHandle),
)
def to_key(self) -> str:
return f"{self.node_handle}[?]"
def resolve(self, map_key) -> "ResolvedFromDynamicStepHandle":
return ResolvedFromDynamicStepHandle(self.node_handle, map_key)
# Serialize node_handle -> solid_handle for backcompat
@whitelist_for_serdes(storage_field_names={"node_handle": "solid_handle"})
| UnresolvedStepHandle |
python | google__python-fire | fire/testutils.py | {
"start": 767,
"end": 3450
} | class ____(unittest.TestCase):
"""Shared test case for Python Fire tests."""
@contextlib.contextmanager
def assertOutputMatches(self, stdout='.*', stderr='.*', capture=True):
"""Asserts that the context generates stdout and stderr matching regexps.
Note: If wrapped code raises an exception, stdout and stderr will not be
checked.
Args:
stdout: (str) regexp to match against stdout (None will check no stdout)
stderr: (str) regexp to match against stderr (None will check no stderr)
capture: (bool, default True) do not bubble up stdout or stderr
Yields:
Yields to the wrapped context.
"""
stdout_fp = io.StringIO()
stderr_fp = io.StringIO()
try:
with mock.patch.object(sys, 'stdout', stdout_fp):
with mock.patch.object(sys, 'stderr', stderr_fp):
yield
finally:
if not capture:
sys.stdout.write(stdout_fp.getvalue())
sys.stderr.write(stderr_fp.getvalue())
for name, regexp, fp in [('stdout', stdout, stdout_fp),
('stderr', stderr, stderr_fp)]:
value = fp.getvalue()
if regexp is None:
if value:
raise AssertionError('%s: Expected no output. Got: %r' %
(name, value))
else:
if not re.search(regexp, value, re.DOTALL | re.MULTILINE):
raise AssertionError('%s: Expected %r to match %r' %
(name, value, regexp))
@contextlib.contextmanager
def assertRaisesFireExit(self, code, regexp='.*'):
"""Asserts that a FireExit error is raised in the context.
Allows tests to check that Fire's wrapper around SystemExit is raised
and that a regexp is matched in the output.
Args:
code: The status code that the FireExit should contain.
regexp: stdout must match this regex.
Yields:
Yields to the wrapped context.
"""
with self.assertOutputMatches(stderr=regexp):
with self.assertRaises(core.FireExit):
try:
yield
except core.FireExit as exc:
if exc.code != code:
raise AssertionError('Incorrect exit code: %r != %r' %
(exc.code, code))
self.assertIsInstance(exc.trace, trace.FireTrace)
raise
@contextlib.contextmanager
def ChangeDirectory(directory):
"""Context manager to mock a directory change and revert on exit."""
cwdir = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwdir)
# pylint: disable=invalid-name
main = unittest.main
skip = unittest.skip
skipIf = unittest.skipIf
# pylint: enable=invalid-name
| BaseTestCase |
python | falconry__falcon | tests/test_error.py | {
"start": 8916,
"end": 10887
} | class ____:
def test_no_header(self, err, header_name, kw_name, args, res, kw_required):
if not kw_required:
value = err()
if value.headers:
assert header_name not in value.headers
def test_other_header(self, err, header_name, kw_name, args, res, kw_required):
headers = {'foo bar': 'baz'}
kw = {kw_name: args}
value = err(**kw, headers=headers)
assert value.headers['foo bar'] == 'baz'
assert header_name in value.headers
assert value.headers[header_name] == res
def test_override_header(self, err, header_name, kw_name, args, res, kw_required):
headers = {'foo bar': 'baz', header_name: 'other'}
kw = {kw_name: args}
value = err(**kw, headers=headers)
assert value.headers['foo bar'] == 'baz'
assert header_name in value.headers
assert value.headers[header_name] == res
def test_other_header_list(self, err, header_name, kw_name, args, res, kw_required):
headers = [('foo bar', 'baz')]
kw = {kw_name: args}
value = err(**kw, headers=headers)
assert value.headers['foo bar'] == 'baz'
assert header_name in value.headers
assert isinstance(value.headers, dict)
assert value.headers[header_name] == res
def test_override_header_list(
self, err, header_name, kw_name, args, res, kw_required
):
headers = [('foo bar', 'baz'), (header_name, 'other')]
kw = {kw_name: args}
value = err(**kw, headers=headers)
assert value.headers['foo bar'] == 'baz'
assert header_name in value.headers
assert isinstance(value.headers, dict)
assert value.headers[header_name] == res
def test_http_payload_too_large_deprecation():
with pytest.warns(match='HTTPContentTooLarge'):
err = errors.HTTPPayloadTooLarge()
assert err.title == '413 Content Too Large'
| TestErrorsWithHeadersKW |
python | google__jax | tests/memories_test.py | {
"start": 65238,
"end": 72224
} | class ____(jtu.JaxTestCase):
def setUp(self):
if not jtu.test_device_matches(["tpu", "gpu"]):
self.skipTest("Memories do not work on CPU backend.")
super().setUp()
def test_remat_jaxpr_offloadable(self):
mesh = jtu.create_mesh((2,), ("x",))
inp = jax.device_put(np.arange(16.), NamedSharding(mesh, P("x")))
def policy(prim, *avals, **params):
return Offloadable(src="device", dst="pinned_host")
@functools.partial(remat, policy=policy)
def f(x):
x = jnp.sin(x)
x = jnp.sin(x)
x = jnp.sin(x)
return jnp.sum(x)
fwd_jaxpr, bwd_jaxpr = jtu.fwd_bwd_jaxprs(f, inp)
self.assertLen(fwd_jaxpr.out_avals, 4) # 1 output, 3 offloaded residuals
fwd_mem_kind_count = str(fwd_jaxpr).count("MemorySpace.Host")
self.assertEqual(fwd_mem_kind_count, 3)
self.assertLen(bwd_jaxpr.in_avals, 4) # 3 offloaded residuals, 1 input
bwd_mem_kind_count = str(bwd_jaxpr).count("MemorySpace.Device")
self.assertEqual(bwd_mem_kind_count, 3)
# Execution test.
f = jax.jit(jax.grad(f))
f(inp) # doesn't crash
compiled_f = f.lower(inp).compile()
compiled_text = compiled_f.as_text()
if compiled_text is not None:
self.assertIn('S(5)', compiled_text)
self.assertRegex(compiled_text, r"copy-start.*S\(5\)")
self.assertRegex(compiled_text, r"copy-done.*S\(5\)")
compiled_stats = compiled_f.memory_analysis()
if compiled_stats is not None:
if jtu.pjrt_c_api_version_at_least(0, 43):
self.assertGreater(compiled_stats.host_temp_size_in_bytes, 0)
def test_remat_scan_jaxpr_offloadable(self):
mesh = jtu.create_mesh((2,), ("x",))
shape = (256, 128)
np_inp = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
s = NamedSharding(mesh, P("x"))
inp = jax.device_put(np_inp, s)
with self.assertRaisesRegex(
ValueError, "The names should be exclusive and should not intersect"):
jax.checkpoint_policies.save_and_offload_only_these_names(
names_which_can_be_saved=["y"], names_which_can_be_offloaded=["y", "w"],
offload_src="device", offload_dst="pinned_host")
policy = jax.checkpoint_policies.save_and_offload_only_these_names(
names_which_can_be_saved=["y"], names_which_can_be_offloaded=["z", "w"],
offload_src='device', offload_dst='pinned_host')
@functools.partial(remat, policy=policy)
def f(x):
def g(ys, _):
y, _ = ys
y = checkpoint_name(jnp.sin(y), "y")
z = checkpoint_name(jnp.sin(y), "z")
z = jax.lax.with_sharding_constraint(z, s)
w = checkpoint_name(jnp.sin(z), "w")
return (w, jnp.sum(w)), None
_, scan_out = jax.lax.scan(g, (x, np.array(1, dtype=np.float32)), [np_inp])[0]
return scan_out
fwd_jaxpr, bwd_jaxpr = jtu.fwd_bwd_jaxprs(f, inp)
self.assertLen(fwd_jaxpr.out_avals, 5) # 2 output, 3 offloaded residuals
fwd_mem_kind_count = str(fwd_jaxpr).count("MemorySpace.Host")
self.assertEqual(fwd_mem_kind_count, 2)
self.assertLen(bwd_jaxpr.in_avals, 5) # 3 offloaded residuals, 2 input
bwd_mem_kind_count = str(bwd_jaxpr).count("MemorySpace.Device")
self.assertEqual(bwd_mem_kind_count, 2)
f = jax.jit(jax.grad(f))
f(inp) # doesn't crash
compiled_f = f.lower(inp).compile()
compiled_text = compiled_f.as_text()
if compiled_text is not None:
self.assertIn('S(5)', compiled_text)
compiled_stats = compiled_f.memory_analysis()
if compiled_stats is not None:
self.assertGreater(compiled_stats.host_temp_size_in_bytes, 0)
def test_remat_scan_layout_change_offloadable(self):
mesh = jtu.create_mesh((2,), ("x",))
shape = (256, 128)
np_inp = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
s = NamedSharding(mesh, P("x"))
inp = jax.device_put(np_inp, s)
policy = jax.checkpoint_policies.save_and_offload_only_these_names(
names_which_can_be_saved=["y"], names_which_can_be_offloaded=["z", "w"],
offload_src='device', offload_dst='pinned_host')
@functools.partial(remat, policy=policy)
def f(x):
def g(ys, _):
y, _ = ys
y = checkpoint_name(jnp.sin(y), "y")
z = checkpoint_name(jnp.sin(y), "z")
z = jax.lax.with_sharding_constraint(z, s)
z = z.T
w = checkpoint_name(jnp.sin(z), "w")
return (w.T, jnp.sum(w)), None
_, scan_out = jax.lax.scan(g, (x, np.array(1, dtype=np.float32)), [np_inp])[0]
return scan_out
f = jax.jit(jax.grad(f))
f(inp) # doesn't crash
compiled_f = f.lower(inp).compile()
compiled_text = compiled_f.as_text()
if compiled_text is not None:
self.assertIn('S(5)', compiled_text)
self.assertRegex(compiled_text, r"dynamic-update-slice-start.*S\(5\)")
self.assertRegex(compiled_text, r"dynamic-update-slice-done.*S\(5\)")
self.assertRegex(compiled_text, r"dynamic-slice-start.*S\(5\)")
self.assertIn("dynamic-slice-start", compiled_text)
compiled_stats = compiled_f.memory_analysis()
if compiled_stats is not None:
self.assertGreater(compiled_stats.host_temp_size_in_bytes, 0)
def test_remat_checkpoint_dots_with_no_batch_dims(self):
policy = jax.checkpoint_policies.offload_dot_with_no_batch_dims(
"device", "pinned_host")
@functools.partial(new_checkpoint, policy=policy)
def f(x):
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.einsum('ij,jk->ik', x, x, precision=lax.Precision.HIGHEST)
x = jnp.sin(x)
x = jnp.sum(x)
return x
inp = jnp.ones((2, 2))
f = jax.jit(jax.grad(f))
f(inp) # doesn't crash
compiled_f = f.lower(inp).compile()
compiled_text = compiled_f.as_text()
if compiled_text is not None:
self.assertIn('S(5)', compiled_text)
self.assertRegex(compiled_text, r"copy-start.*S\(5\)")
self.assertRegex(compiled_text, r"copy-done.*S\(5\)")
compiled_stats = compiled_f.memory_analysis()
if compiled_stats is not None:
self.assertGreater(compiled_stats.host_temp_size_in_bytes, 0)
def test_primitive_with_multiple_outputs(self):
# Test for https://github.com/jax-ml/jax/issues/25841
shape = (128,)
inp = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
def policy(prim, *args, **kwargs):
del args, kwargs
if prim.multiple_results:
return Offloadable("device", "pinned_host")
return Recompute
@functools.partial(remat, policy=policy)
def test_fn(x):
# Need any primitive with multiple outputs and a non-trivial grad.
x1, _ = jax.lax.approx_max_k(x, k=2)
return jnp.sum(x1)
fn = jax.grad(test_fn)
jax.jit(fn)(inp) # doesn't crash
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| ActivationOffloadingTest |
python | walkccc__LeetCode | solutions/3. Longest Substring Without Repeating Characters/3.py | {
"start": 0,
"end": 290
} | class ____:
def lengthOfLongestSubstring(self, s: str) -> int:
ans = 0
count = collections.Counter()
l = 0
for r, c in enumerate(s):
count[c] += 1
while count[c] > 1:
count[s[l]] -= 1
l += 1
ans = max(ans, r - l + 1)
return ans
| Solution |
python | pennersr__django-allauth | allauth/socialaccount/providers/apple/provider.py | {
"start": 1010,
"end": 2941
} | class ____(OAuth2Provider):
id = "apple"
name = "Apple"
account_class = AppleAccount
oauth2_adapter_class = AppleOAuth2Adapter
supports_token_authentication = True
def extract_uid(self, data):
return str(data["sub"])
def extract_common_fields(self, data):
fields = {"email": data.get("email")}
# If the name was provided
name = data.get("name")
if name:
fields["first_name"] = name.get("firstName", "")
fields["last_name"] = name.get("lastName", "")
return fields
def extract_email_addresses(self, data):
ret = []
email = data.get("email")
verified = data.get("email_verified")
if isinstance(verified, str):
verified = verified.lower() == "true"
if email:
ret.append(
EmailAddress(
email=email,
verified=verified,
primary=True,
)
)
return ret
def get_default_scope(self):
scopes = ["name"]
if QUERY_EMAIL:
scopes.append("email")
return scopes
def verify_token(self, request, token):
from allauth.socialaccount.providers.apple.views import AppleOAuth2Adapter
id_token = token.get("id_token")
if not id_token:
raise get_adapter().validation_error("invalid_token")
try:
identity_data = AppleOAuth2Adapter.get_verified_identity_data(
self, id_token
)
except (OAuth2Error, requests.RequestException) as e:
raise get_adapter().validation_error("invalid_token") from e
login = self.sociallogin_from_response(request, identity_data)
return login
def get_auds(self):
return [aud.strip() for aud in self.app.client_id.split(",")]
provider_classes = [AppleProvider]
| AppleProvider |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 5831,
"end": 6827
} | class ____:
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
| TestRanking |
python | redis__redis-py | tests/test_maint_notifications.py | {
"start": 12685,
"end": 14280
} | class ____:
"""Test the NodeFailingOverNotification class."""
def test_init(self):
"""Test NodeFailingOverNotification initialization."""
with patch("time.monotonic", return_value=1000):
notification = NodeFailingOverNotification(id=1, ttl=5)
assert notification.id == 1
assert notification.ttl == 5
assert notification.creation_time == 1000
def test_repr(self):
"""Test NodeFailingOverNotification string representation."""
with patch("time.monotonic", return_value=1000):
notification = NodeFailingOverNotification(id=1, ttl=5)
with patch("time.monotonic", return_value=1002): # 2 seconds later
repr_str = repr(notification)
assert "NodeFailingOverNotification" in repr_str
assert "id=1" in repr_str
assert "ttl=5" in repr_str
assert "remaining=3.0s" in repr_str
assert "expired=False" in repr_str
def test_equality_and_hash(self):
"""Test equality and hash for NodeFailingOverNotification."""
notification1 = NodeFailingOverNotification(id=1, ttl=5)
notification2 = NodeFailingOverNotification(
id=1, ttl=10
) # Same id, different ttl
notification3 = NodeFailingOverNotification(id=2, ttl=5) # Different id
assert notification1 == notification2
assert notification1 != notification3
assert hash(notification1) == hash(notification2)
assert hash(notification1) != hash(notification3)
| TestNodeFailingOverNotification |
python | pytorch__pytorch | test/dynamo/test_sources.py | {
"start": 235,
"end": 281
} | class ____:
value = 5
| CausalLMOutputWithPast |
python | pandas-dev__pandas | pandas/io/formats/info.py | {
"start": 15828,
"end": 16381
} | class ____:
"""
Class for printing dataframe or series info.
"""
def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None:
"""Save dataframe info into buffer."""
table_builder = self._create_table_builder()
lines = table_builder.get_lines()
if buf is None: # pragma: no cover
buf = sys.stdout
fmt.buffer_put_lines(buf, lines)
@abstractmethod
def _create_table_builder(self) -> _TableBuilderAbstract:
"""Create instance of table builder."""
| _InfoPrinterAbstract |
python | getsentry__sentry-python | sentry_sdk/tracing.py | {
"start": 45108,
"end": 51380
} | class ____(Span):
def __repr__(self):
# type: () -> str
return "<%s>" % self.__class__.__name__
@property
def containing_transaction(self):
# type: () -> Optional[Transaction]
return None
def start_child(self, instrumenter=INSTRUMENTER.SENTRY, **kwargs):
# type: (str, **Any) -> NoOpSpan
return NoOpSpan()
def to_traceparent(self):
# type: () -> str
return ""
def to_baggage(self):
# type: () -> Optional[Baggage]
return None
def get_baggage(self):
# type: () -> Optional[Baggage]
return None
def iter_headers(self):
# type: () -> Iterator[Tuple[str, str]]
return iter(())
def set_tag(self, key, value):
# type: (str, Any) -> None
pass
def set_data(self, key, value):
# type: (str, Any) -> None
pass
def update_data(self, data):
# type: (Dict[str, Any]) -> None
pass
def set_status(self, value):
# type: (str) -> None
pass
def set_http_status(self, http_status):
# type: (int) -> None
pass
def is_success(self):
# type: () -> bool
return True
def to_json(self):
# type: () -> Dict[str, Any]
return {}
def get_trace_context(self):
# type: () -> Any
return {}
def get_profile_context(self):
# type: () -> Any
return {}
def finish(
self,
scope=None, # type: Optional[sentry_sdk.Scope]
end_timestamp=None, # type: Optional[Union[float, datetime]]
*,
hub=None, # type: Optional[sentry_sdk.Hub]
):
# type: (...) -> Optional[str]
"""
The `hub` parameter is deprecated. Please use the `scope` parameter, instead.
"""
pass
def set_measurement(self, name, value, unit=""):
# type: (str, float, MeasurementUnit) -> None
pass
def set_context(self, key, value):
# type: (str, dict[str, Any]) -> None
pass
def init_span_recorder(self, maxlen):
# type: (int) -> None
pass
def _set_initial_sampling_decision(self, sampling_context):
# type: (SamplingContext) -> None
pass
if TYPE_CHECKING:
@overload
def trace(
func=None, *, op=None, name=None, attributes=None, template=SPANTEMPLATE.DEFAULT
):
# type: (None, Optional[str], Optional[str], Optional[dict[str, Any]], SPANTEMPLATE) -> Callable[[Callable[P, R]], Callable[P, R]]
# Handles: @trace() and @trace(op="custom")
pass
@overload
def trace(func):
# type: (Callable[P, R]) -> Callable[P, R]
# Handles: @trace
pass
def trace(
func=None, *, op=None, name=None, attributes=None, template=SPANTEMPLATE.DEFAULT
):
# type: (Optional[Callable[P, R]], Optional[str], Optional[str], Optional[dict[str, Any]], SPANTEMPLATE) -> Union[Callable[P, R], Callable[[Callable[P, R]], Callable[P, R]]]
"""
Decorator to start a child span around a function call.
This decorator automatically creates a new span when the decorated function
is called, and finishes the span when the function returns or raises an exception.
:param func: The function to trace. When used as a decorator without parentheses,
this is the function being decorated. When used with parameters (e.g.,
``@trace(op="custom")``, this should be None.
:type func: Callable or None
:param op: The operation name for the span. This is a high-level description
of what the span represents (e.g., "http.client", "db.query").
You can use predefined constants from :py:class:`sentry_sdk.consts.OP`
or provide your own string. If not provided, a default operation will
be assigned based on the template.
:type op: str or None
:param name: The human-readable name/description for the span. If not provided,
defaults to the function name. This provides more specific details about
what the span represents (e.g., "GET /api/users", "process_user_data").
:type name: str or None
:param attributes: A dictionary of key-value pairs to add as attributes to the span.
Attribute values must be strings, integers, floats, or booleans. These
attributes provide additional context about the span's execution.
:type attributes: dict[str, Any] or None
:param template: The type of span to create. This determines what kind of
span instrumentation and data collection will be applied. Use predefined
constants from :py:class:`sentry_sdk.consts.SPANTEMPLATE`.
The default is `SPANTEMPLATE.DEFAULT` which is the right choice for most
use cases.
:type template: :py:class:`sentry_sdk.consts.SPANTEMPLATE`
:returns: When used as ``@trace``, returns the decorated function. When used as
``@trace(...)`` with parameters, returns a decorator function.
:rtype: Callable or decorator function
Example::
import sentry_sdk
from sentry_sdk.consts import OP, SPANTEMPLATE
# Simple usage with default values
@sentry_sdk.trace
def process_data():
# Function implementation
pass
# With custom parameters
@sentry_sdk.trace(
op=OP.DB_QUERY,
name="Get user data",
attributes={"postgres": True}
)
def make_db_query(sql):
# Function implementation
pass
# With a custom template
@sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL)
def calculate_interest_rate(amount, rate, years):
# Function implementation
pass
"""
from sentry_sdk.tracing_utils import create_span_decorator
decorator = create_span_decorator(
op=op,
name=name,
attributes=attributes,
template=template,
)
if func:
return decorator(func)
else:
return decorator
# Circular imports
from sentry_sdk.tracing_utils import (
Baggage,
EnvironHeaders,
extract_sentrytrace_data,
_generate_sample_rand,
has_tracing_enabled,
maybe_create_breadcrumbs_from_span,
)
| NoOpSpan |
python | getsentry__sentry | tests/sentry/utils/test_glob.py | {
"start": 95,
"end": 2086
} | class ____(NamedTuple):
value: str | None
pat: str
kwargs: dict[str, bool]
@classmethod
def make(cls, value: str | None, pat: str, **kwargs: bool) -> Self:
return cls(value=value, pat=pat, kwargs=kwargs)
def __call__(self):
return glob_match(self.value, self.pat, **self.kwargs)
@pytest.mark.parametrize(
"glob_input,expect",
[
[GlobInput.make("hello.py", "*.py"), True],
[GlobInput.make("hello.py", "*.js"), False],
[GlobInput.make(None, "*.js"), False],
[GlobInput.make(None, "*"), True],
[GlobInput.make("foo/hello.py", "*.py"), True],
[GlobInput.make("foo/hello.py", "*.py", doublestar=True), False],
[GlobInput.make("foo/hello.py", "**/*.py", doublestar=True), True],
[GlobInput.make("foo/hello.PY", "**/*.py"), False],
[GlobInput.make("foo/hello.PY", "**/*.py", doublestar=True), False],
[GlobInput.make("foo/hello.PY", "**/*.py", ignorecase=True), True],
[GlobInput.make("foo/hello.PY", "**/*.py", doublestar=True, ignorecase=True), True],
[GlobInput.make("root\\foo\\hello.PY", "root/**/*.py", ignorecase=True), False],
[
GlobInput.make("root\\foo\\hello.PY", "root/**/*.py", doublestar=True, ignorecase=True),
False,
],
[
GlobInput.make(
"root\\foo\\hello.PY", "root/**/*.py", ignorecase=True, path_normalize=True
),
True,
],
[
GlobInput.make(
"root\\foo\\hello.PY",
"root/**/*.py",
doublestar=True,
ignorecase=True,
path_normalize=True,
),
True,
],
[GlobInput.make("foo:\nbar", "foo:*"), True],
[GlobInput.make("foo:\nbar", "foo:*", allow_newline=False), False],
],
)
def test_glob_match(glob_input: GlobInput, expect: bool) -> None:
assert glob_input() == expect
| GlobInput |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/interfaces.py | {
"start": 8018,
"end": 9952
} | class ____:
key: str
reverse: bool = False
SortersDefinition: TypeAlias = List[Union[Sorter, str, dict]]
def _is_sorter_list(
sorters: SortersDefinition,
) -> TypeGuard[list[Sorter]]:
return len(sorters) == 0 or isinstance(sorters[0], Sorter)
def _is_str_sorter_list(sorters: SortersDefinition) -> TypeGuard[list[str]]:
return len(sorters) > 0 and isinstance(sorters[0], str)
def _sorter_from_list(sorters: SortersDefinition) -> list[Sorter]:
if _is_sorter_list(sorters):
return sorters
# mypy doesn't successfully type-narrow sorters to a list[str] here, so we use
# another TypeGuard. We could cast instead which may be slightly faster.
sring_valued_sorter: str
if _is_str_sorter_list(sorters):
return [_sorter_from_str(sring_valued_sorter) for sring_valued_sorter in sorters]
# This should never be reached because of static typing but is necessary because
# mypy doesn't know of the if conditions must evaluate to True.
raise ValueError( # noqa: TRY003 # FIXME CoP
f"sorters is a not a SortersDefinition but is a {type(sorters)}"
)
def _sorter_from_str(sort_key: str) -> Sorter:
"""Convert a list of strings to Sorter objects
Args:
sort_key: A batch metadata key which will be used to sort batches on a data asset.
This can be prefixed with a + or - to indicate increasing or decreasing
sorting. If not specified, defaults to increasing order.
"""
if sort_key[0] == "-":
return Sorter(key=sort_key[1:], reverse=True)
if sort_key[0] == "+":
return Sorter(key=sort_key[1:], reverse=False)
return Sorter(key=sort_key, reverse=False)
# It would be best to bind this to ExecutionEngine, but we can't now due to circular imports
_ExecutionEngineT = TypeVar("_ExecutionEngineT")
DatasourceT = TypeVar("DatasourceT", bound="Datasource")
@public_api
| Sorter |
python | getsentry__sentry | src/sentry/models/dashboard.py | {
"start": 1351,
"end": 6940
} | class ____(BaseManager["DashboardFavoriteUser"]):
def get_last_position(self, organization: Organization, user_id: int) -> int:
"""
Returns the last position of a user's favorited dashboards in an organization.
"""
last_favorite_dashboard = (
self.filter(
organization=organization,
user_id=user_id,
position__isnull=False,
)
.order_by("-position")
.first()
)
if last_favorite_dashboard and last_favorite_dashboard.position is not None:
return last_favorite_dashboard.position
return 0
def get_favorite_dashboards(
self, organization: Organization, user_id: int
) -> QuerySet[DashboardFavoriteUser]:
"""
Returns all favorited dashboards for a user in an organization.
"""
return self.filter(organization=organization, user_id=user_id).order_by(
"position", "dashboard__title"
)
def get_favorite_dashboard(
self, organization: Organization, user_id: int, dashboard: Dashboard
) -> DashboardFavoriteUser | None:
"""
Returns the favorite dashboard if it exists, otherwise None.
"""
return self.filter(organization=organization, user_id=user_id, dashboard=dashboard).first()
def reorder_favorite_dashboards(
self, organization: Organization, user_id: int, new_dashboard_positions: list[int]
):
"""
Reorders the positions of favorited dashboards for a user in an organization.
Does NOT add or remove favorited dashboards.
Args:
organization: The organization the dashboards belong to
user_id: The ID of the user whose favorited dashboards are being reordered
new_dashboard_positions: List of dashboard IDs in their new order
Raises:
ValueError: If there's a mismatch between existing favorited dashboards and the provided list
"""
existing_favorite_dashboards = self.filter(
organization=organization,
user_id=user_id,
)
existing_dashboard_ids = {
favorite.dashboard.id for favorite in existing_favorite_dashboards
}
new_dashboard_ids = set(new_dashboard_positions)
sentry_sdk.set_context(
"reorder_favorite_dashboards",
{
"organization": organization.id,
"user_id": user_id,
"existing_dashboard_ids": existing_dashboard_ids,
"new_dashboard_positions": new_dashboard_positions,
},
)
if existing_dashboard_ids != new_dashboard_ids:
raise ValueError("Mismatch between existing and provided favorited dashboards.")
position_map = {
dashboard_id: idx for idx, dashboard_id in enumerate(new_dashboard_positions)
}
favorites_to_update = list(existing_favorite_dashboards)
for favorite in favorites_to_update:
favorite.position = position_map[favorite.dashboard.id]
with transaction.atomic(using=router.db_for_write(DashboardFavoriteUser)):
if favorites_to_update:
self.bulk_update(favorites_to_update, ["position"])
def insert_favorite_dashboard(
self,
organization: Organization,
user_id: int,
dashboard: Dashboard,
) -> bool:
"""
Inserts a new favorited dashboard at the end of the list.
Args:
organization: The organization the dashboards belong to
user_id: The ID of the user whose favorited dashboards are being updated
dashboard: The dashboard to insert
Returns:
True if the dashboard was favorited, False if the dashboard was already favorited
"""
with transaction.atomic(using=router.db_for_write(DashboardFavoriteUser)):
if self.get_favorite_dashboard(organization, user_id, dashboard):
return False
if self.count() == 0:
position = 0
else:
position = self.get_last_position(organization, user_id) + 1
self.create(
organization=organization,
user_id=user_id,
dashboard=dashboard,
position=position,
)
return True
def delete_favorite_dashboard(
self, organization: Organization, user_id: int, dashboard: Dashboard
) -> bool:
"""
Deletes a favorited dashboard from the list.
Decrements the position of all dashboards after the deletion point.
Args:
organization: The organization the dashboards belong to
user_id: The ID of the user whose favorited dashboards are being updated
dashboard: The dashboard to delete
Returns:
True if the dashboard was unfavorited, False if the dashboard was already unfavorited
"""
with transaction.atomic(using=router.db_for_write(DashboardFavoriteUser)):
if not (favorite := self.get_favorite_dashboard(organization, user_id, dashboard)):
return False
deleted_position = favorite.position
favorite.delete()
self.filter(
organization=organization, user_id=user_id, position__gt=deleted_position
).update(position=models.F("position") - 1)
return True
@region_silo_model
| DashboardFavoriteUserManager |
python | bokeh__bokeh | src/bokeh/colors/groups.py | {
"start": 8641,
"end": 9812
} | class ____(ColorGroup):
''' CSS "Yellow" Color Group as defined by https://www.w3schools.com/colors/colors_groups.asp
.. bokeh-color:: yellow
.. bokeh-color:: lightyellow
.. bokeh-color:: lemonchiffon
.. bokeh-color:: lightgoldenrodyellow
.. bokeh-color:: papayawhip
.. bokeh-color:: moccasin
.. bokeh-color:: peachpuff
.. bokeh-color:: palegoldenrod
.. bokeh-color:: khaki
.. bokeh-color:: darkkhaki
.. bokeh-color:: gold
'''
_colors = ('Yellow', 'LightYellow', 'LemonChiffon', 'LightGoldenrodYellow', 'PapayaWhip',
'Moccasin', 'PeachPuff', 'PaleGoldenrod', 'Khaki', 'DarkKhaki', 'Gold')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| yellow |
python | HypothesisWorks__hypothesis | hypothesis-python/docs/_ext/hypothesis_redirects.py | {
"start": 2060,
"end": 7519
} | class ____:
def __init__(self, app: Sphinx) -> None:
self.app = app
self.redirects_option: dict[str, str] = getattr(app.config, OPTION_REDIRECTS)
self.template_file_option: str = getattr(app.config, OPTION_TEMPLATE_FILE)
def grab_redirects(self) -> Mapping[str, str]:
"""Inspect redirects option in conf.py and returns dict mapping
docname to target (with expanded placeholder)."""
# docname-target dict
to_be_redirected = {}
# For each source-target redirect pair in conf.py
for source, target in self.redirects_option.items():
# no wildcard, append source as-is
if not self._contains_wildcard(source):
to_be_redirected[source] = target
continue
assert self.app.env
# wildcarded source, expand to docnames
expanded_docs = [
doc for doc in self.app.env.found_docs if fnmatch(doc, source)
]
if not expanded_docs:
logger.warning(f"No documents match to '{source}' redirect.")
continue
for doc in expanded_docs:
new_target = self._apply_placeholders(doc, target)
to_be_redirected[doc] = new_target
return to_be_redirected
def docname_out_path(self, docname: str, suffix: str) -> Sequence[str]:
"""
For a Sphinx docname (the path to a source document without suffix),
returns path to outfile that would be created by the used builder.
"""
# Return as-is, if the docname already has been passed with a suffix
if docname.endswith(suffix):
return [docname]
# Remove any trailing slashes, except for "/"" index
if len(docname) > 1 and docname.endswith(SEP):
docname = docname.rstrip(SEP)
# Figure out whether we have dirhtml builder
out_uri = self.app.builder.get_target_uri(docname=docname) # type: ignore
if not out_uri.endswith(suffix):
# If dirhtml builder is used, need to append "index"
return [out_uri, "index"]
# Otherwise, convert e.g. 'source' to 'source.html'
return [out_uri]
def create_redirects(self, to_be_redirected: Mapping[str, str]) -> None:
"""Create actual redirect file for each pair in passed mapping of
docnames to targets."""
# Corresponds to value of `html_file_suffix`, but takes into account
# modifications done by the builder class
try:
suffix = self.app.builder.out_suffix # type: ignore
except Exception:
suffix = ".html"
for docname, target in to_be_redirected.items():
out = self.docname_out_path(docname, suffix)
redirect_file_abs = Path(self.app.outdir).joinpath(*out).with_suffix(suffix)
redirect_file_rel = redirect_file_abs.relative_to(self.app.outdir)
if redirect_file_abs.exists():
logger.info(
f"Overwriting '{redirect_file_rel}' with redirect to '{target}'."
)
else:
logger.info(f"Creating redirect '{redirect_file_rel}' to '{target}'.")
self._create_redirect_file(redirect_file_abs, target)
@staticmethod
def _contains_wildcard(text: str) -> bool:
"""Tells whether passed argument contains wildcard characters."""
return bool(wildcard_pattern.search(text))
@staticmethod
def _apply_placeholders(source: str, target: str) -> str:
"""Expand "source" placeholder in target and return it"""
return Template(target).substitute({"source": source})
def _create_redirect_file(self, at_path: Path, to_uri: str) -> None:
"""Actually create a redirect file according to redirect template"""
content = self._render_redirect_template(to_uri)
# create any missing parent folders
at_path.parent.mkdir(parents=True, exist_ok=True)
at_path.write_text(content, encoding="utf-8")
def _render_redirect_template(self, to_uri: str) -> str:
# HTML used as redirect file content
redirect_template = REDIRECT_FILE_DEFAULT_TEMPLATE
if self.template_file_option:
redirect_file_abs = Path(self.app.srcdir, self.template_file_option)
redirect_template = redirect_file_abs.read_text(encoding="utf-8")
return Template(redirect_template).substitute({"to_uri": to_uri})
def collect_redirects_for_linkcheck(app):
# Ignore when not invoked with linkcheck builder
if not isinstance(app.builder, CheckExternalLinksBuilder):
return
redirects = Reredirects(app).grab_redirects()
for docname, target in redirects.items():
# Give a Sphinx or extensions change to modify original target URL
if new_target := app.emit_firstresult("linkcheck-process-uri", target):
target = new_target
if urlparse(target).scheme not in ("http", "https"):
# Checking redirects to other pages of the same documentation is not
# supported for now.
continue
# Add target external URL to hyperlinks which linkcheck builder will check
docpath = app.env.doc2path(docname)
hyperlink = Hyperlink(uri=target, docname=docname, docpath=docpath, lineno=-1)
app.builder.hyperlinks[target] = hyperlink
| Reredirects |
python | django__django | tests/template_tests/filter_tests/test_linebreaks.py | {
"start": 1087,
"end": 2344
} | class ____(SimpleTestCase):
def test_line(self):
self.assertEqual(linebreaks_filter("line 1"), "<p>line 1</p>")
def test_newline(self):
self.assertEqual(linebreaks_filter("line 1\nline 2"), "<p>line 1<br>line 2</p>")
def test_carriage(self):
self.assertEqual(linebreaks_filter("line 1\rline 2"), "<p>line 1<br>line 2</p>")
def test_carriage_newline(self):
self.assertEqual(
linebreaks_filter("line 1\r\nline 2"), "<p>line 1<br>line 2</p>"
)
def test_non_string_input(self):
self.assertEqual(linebreaks_filter(123), "<p>123</p>")
def test_autoescape(self):
self.assertEqual(
linebreaks_filter("foo\n<a>bar</a>\nbuz"),
"<p>foo<br><a>bar</a><br>buz</p>",
)
def test_autoescape_off(self):
self.assertEqual(
linebreaks_filter("foo\n<a>bar</a>\nbuz", autoescape=False),
"<p>foo<br><a>bar</a><br>buz</p>",
)
def test_lazy_string_input(self):
add_header = lazy(lambda string: "Header\n\n" + string, str)
self.assertEqual(
linebreaks_filter(add_header("line 1\r\nline2")),
"<p>Header</p>\n\n<p>line 1<br>line2</p>",
)
| FunctionTests |
python | pytorch__pytorch | torchgen/api/lazy.py | {
"start": 8936,
"end": 11456
} | class ____:
"""Collection of properties for an IR node
The property groups are listed below. Each group is mutually
exclusive, meaning that only one property from each group can be True
at any one time. The properties can be accessed as if they were normal
attributes. The mutual exclusivity is automatically handled.
"""
Properties: tuple[tuple[str, ...], ...] = (
(
"ShapePrecompute", # Assume shape has been precomputed
"ShapeCompute", # Need to compute the shape on construction
"ShapeCache", # Utilize the shape cache to defer computation
),
(
"Lower", # Codegen full lower function
"LowerDeclOnly", # Codegen only lower function declaration
),
(
"CanBeReused", # Codegen full reuse function
"CanBeReusedDeclOnly", # Codegen only reuse function declaration
),
(
"CreateFn", # Codegen full create function
"CreateFnDeclOnly", # Codegen only create function declaration
),
(
"TreatScalarsAsConstants", # Treat Scalars as constants instead of handling like values
),
)
def __init__(self, *default_properties: str) -> None:
properties: dict[tuple[str, ...], str | None] = dict.fromkeys(
LazyIrProperties.Properties
)
self.__dict__["properties"] = properties
for p in default_properties:
setattr(self, p, True)
def __getattr__(self, key: str) -> Any:
properties = self.__dict__["properties"]
for values in LazyIrProperties.Properties:
if key in values:
return properties[values] == key
return self.__getattribute__(key)
def __setattr__(self, key: str, value: Any) -> Any:
properties = self.__dict__["properties"]
for values in LazyIrProperties.Properties:
if key in values:
properties[values] = key if value else None
return value
raise KeyError(f"Invalid property: {key}")
# Inspired by a FunctionSchema object, a LazyIrSchema holds the schema of a Lazy IR node.
# Unlike a FunctionSchema, it has no round-trippable string form (relating to the YAML),
# but carries type information from a native FunctionSchema modified for use with IR nodes,
# and preserving original argument names.
#
# TODO: This is not idiomatic with how other torchgen APIs transform on schema.
| LazyIrProperties |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 5581,
"end": 5859
} | class ____(graphene.ObjectType):
metrics = graphene.List(graphene.NonNull('mlflow.server.graphql.graphql_schema_extensions.MlflowMetricExtension'))
params = graphene.List(graphene.NonNull(MlflowParam))
tags = graphene.List(graphene.NonNull(MlflowRunTag))
| MlflowRunData |
python | getsentry__sentry | tests/sentry/snuba/metrics/fields/test_base.py | {
"start": 17618,
"end": 25091
} | class ____(TestCase):
def setUp(self) -> None:
self.sessions_errored = MOCKED_DERIVED_METRICS[SessionMRI.ERRORED.value]
def test_get_entity(self) -> None:
"""
Test that ensures that the even when generating the component entities dict of instances
of SingleEntityDerivedMetric, we are still validating that they exist
"""
assert self.sessions_errored.get_entity(
projects=[self.project], use_case_id=UseCaseID.SESSIONS
) == {
"metrics_counters": [
SessionMRI.ERRORED_PREAGGREGATED.value,
SessionMRI.CRASHED_AND_ABNORMAL.value,
],
"metrics_sets": [SessionMRI.ERRORED_SET.value],
}
@mock.patch(
"sentry.snuba.metrics.fields.base._get_entity_of_metric_mri", get_entity_of_metric_mocked
)
def test_get_entity_and_validate_dependency_tree_of_single_entity_constituents(self) -> None:
use_case_id = UseCaseID.SESSIONS
assert self.sessions_errored.get_entity(
projects=[self.project], use_case_id=use_case_id
) == {
"metrics_counters": [
SessionMRI.ERRORED_PREAGGREGATED.value,
SessionMRI.CRASHED_AND_ABNORMAL.value,
],
"metrics_sets": [SessionMRI.ERRORED_SET.value],
}
component_entities = DERIVED_METRICS[SessionMRI.HEALTHY.value].get_entity(
projects=[self.project], use_case_id=use_case_id
)
assert isinstance(component_entities, dict)
assert sorted(component_entities["metrics_counters"]) == [
SessionMRI.ALL.value,
SessionMRI.ERRORED_PREAGGREGATED.value,
]
assert sorted(component_entities["metrics_sets"]) == [SessionMRI.ERRORED_SET.value]
def test_generate_metric_ids(self) -> None:
with pytest.raises(NotSupportedOverCompositeEntityException):
self.sessions_errored.generate_metric_ids(
projects=[self.project], use_case_id=UseCaseID.SESSIONS
)
def test_generate_select_snql_of_derived_metric(self) -> None:
with pytest.raises(NotSupportedOverCompositeEntityException):
self.sessions_errored.generate_select_statements(
projects=[self.project],
use_case_id=UseCaseID.SESSIONS,
alias="test",
)
def test_generate_orderby_clause(self) -> None:
with pytest.raises(NotSupportedOverCompositeEntityException):
self.sessions_errored.generate_orderby_clause(
direction=Direction.ASC,
projects=[self.project],
use_case_id=UseCaseID.SESSIONS,
alias="test",
)
def test_generate_default_value(self) -> None:
assert self.sessions_errored.generate_default_null_values() == 0
@patch("sentry.snuba.metrics.fields.base.DERIVED_METRICS", MOCKED_DERIVED_METRICS)
def test_generate_bottom_up_derived_metrics_dependencies(self) -> None:
alias = "sessions_errored"
assert list(
self.sessions_errored.generate_bottom_up_derived_metrics_dependencies(alias)
) == [
(
None,
SessionMRI.ERRORED_SET.value,
f"{SessionMRI.ERRORED_SET.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(
None,
SessionMRI.ERRORED_PREAGGREGATED.value,
f"{SessionMRI.ERRORED_PREAGGREGATED.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(
None,
SessionMRI.CRASHED_AND_ABNORMAL.value,
f"{SessionMRI.CRASHED_AND_ABNORMAL.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(
None,
SessionMRI.ERRORED_ALL.value,
f"{SessionMRI.ERRORED_ALL.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(None, SessionMRI.ERRORED.value, alias),
]
alias = "random_composite"
assert list(
MOCKED_DERIVED_METRICS[
"random_composite"
].generate_bottom_up_derived_metrics_dependencies(alias)
) == [
(
None,
SessionMRI.ERRORED_SET.value,
f"{SessionMRI.ERRORED_SET.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(
None,
SessionMRI.ERRORED_PREAGGREGATED.value,
f"{SessionMRI.ERRORED_PREAGGREGATED.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(
None,
SessionMRI.CRASHED_AND_ABNORMAL.value,
f"{SessionMRI.CRASHED_AND_ABNORMAL.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(
None,
SessionMRI.ERRORED_ALL.value,
f"{SessionMRI.ERRORED_ALL.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(
None,
SessionMRI.ERRORED.value,
f"{SessionMRI.ERRORED.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}",
),
(None, "random_composite", alias),
]
def test_run_post_query_function(self) -> None:
alias = "sessions_errored"
totals = {
f"{SessionMRI.ERRORED_SET.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": 3,
f"{SessionMRI.ERRORED_PREAGGREGATED.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": 4.0,
f"{SessionMRI.CRASHED_AND_ABNORMAL.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": 0,
f"{SessionMRI.ERRORED.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": 0,
f"{SessionMRI.ERRORED_ALL.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": 7,
}
series = {
f"{SessionMRI.ERRORED_SET.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": [
0,
0,
0,
0,
3,
0,
],
f"{SessionMRI.ERRORED.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": [
0,
0,
0,
0,
0,
0,
],
f"{SessionMRI.ERRORED_PREAGGREGATED.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": [
4.0,
0,
0,
0,
0,
0,
],
f"{SessionMRI.CRASHED_AND_ABNORMAL.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": [
0,
0,
0,
0,
0,
0,
],
f"{SessionMRI.ERRORED_ALL.value}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{alias}": [
4.0,
0,
0,
0,
3,
0,
],
}
assert (
self.sessions_errored.run_post_query_function(
totals,
alias=alias,
)
== 7
)
assert self.sessions_errored.run_post_query_function(series, alias=alias, idx=0) == 4
assert self.sessions_errored.run_post_query_function(series, alias=alias, idx=4) == 3
| CompositeEntityDerivedMetricTestCase |
python | doocs__leetcode | solution/2200-2299/2207.Maximize Number of Subsequences in a String/Solution.py | {
"start": 0,
"end": 313
} | class ____:
def maximumSubsequenceCount(self, text: str, pattern: str) -> int:
ans = x = y = 0
for c in text:
if c == pattern[1]:
y += 1
ans += x
if c == pattern[0]:
x += 1
ans += max(x, y)
return ans
| Solution |
python | kubernetes-client__python | kubernetes/base/dynamic/exceptions.py | {
"start": 2600,
"end": 2693
} | class ____(Exception):
""" Resource was not found in available APIs """
| ResourceNotFoundError |
python | getsentry__sentry | tests/sentry_plugins/github/test_plugin.py | {
"start": 466,
"end": 3988
} | class ____(PluginTestCase):
@cached_property
def plugin(self) -> GitHubPlugin:
return GitHubPlugin()
@cached_property
def request(self) -> RequestFactory:
return RequestFactory()
def test_get_issue_label(self) -> None:
group = self.create_group(message="Hello world", culprit="foo.bar")
assert self.plugin.get_issue_label(group, "1") == "GH-1"
def test_get_issue_url(self) -> None:
self.plugin.set_option("repo", "getsentry/sentry", self.project)
group = self.create_group(message="Hello world", culprit="foo.bar")
assert (
self.plugin.get_issue_url(group, "1") == "https://github.com/getsentry/sentry/issues/1"
)
def test_is_configured(self) -> None:
assert self.plugin.is_configured(self.project) is False
self.plugin.set_option("repo", "getsentry/sentry", self.project)
assert self.plugin.is_configured(self.project) is True
@responses.activate
def test_create_issue(self) -> None:
responses.add(
responses.POST,
"https://api.github.com/repos/getsentry/sentry/issues",
json={"number": 1, "title": "Hello world"},
)
self.plugin.set_option("repo", "getsentry/sentry", self.project)
group = self.create_group(message="Hello world", culprit="foo.bar")
request = drf_request_from_request(self.request.get("/"))
request.user = AnonymousUser()
form_data = {"title": "Hello", "description": "Fix this."}
with pytest.raises(PluginError):
self.plugin.create_issue(request, group, form_data)
request.user = self.user
self.login_as(self.user)
self.create_usersocialauth(
user=self.user, provider=self.plugin.auth_provider, extra_data={"access_token": "foo"}
)
assert self.plugin.create_issue(request, group, form_data) == 1
request = responses.calls[0].request
assert request.headers["Authorization"] == "Bearer foo"
payload = orjson.loads(request.body)
assert payload == {"title": "Hello", "body": "Fix this.", "assignee": None}
@responses.activate
def test_link_issue(self) -> None:
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/issues/1",
json={"number": 1, "title": "Hello world"},
)
responses.add(
responses.POST,
"https://api.github.com/repos/getsentry/sentry/issues/1/comments",
json={"body": "Hello"},
)
self.plugin.set_option("repo", "getsentry/sentry", self.project)
group = self.create_group(message="Hello world", culprit="foo.bar")
request = drf_request_from_request(self.request.get("/"))
request.user = AnonymousUser()
form_data = {"comment": "Hello", "issue_id": "1"}
with pytest.raises(PluginError):
self.plugin.link_issue(request, group, form_data)
request.user = self.user
self.login_as(self.user)
self.create_usersocialauth(
user=self.user, provider=self.plugin.auth_provider, extra_data={"access_token": "foo"}
)
assert self.plugin.link_issue(request, group, form_data) == {"title": "Hello world"}
request = responses.calls[-1].request
assert request.headers["Authorization"] == "Bearer foo"
payload = orjson.loads(request.body)
assert payload == {"body": "Hello"}
| GitHubPluginTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox33.py | {
"start": 315,
"end": 864
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox33.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"text_rotation": 270})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | joke2k__faker | tests/providers/__init__.py | {
"start": 120,
"end": 10143
} | class ____:
"""Test base provider methods"""
def test_locale(self, faker, num_samples):
locales = [
f"{language}_{region}"
for language, regions in BaseProvider.language_locale_codes.items()
for region in regions
]
for _ in range(num_samples):
locale = faker.locale()
assert locale in locales
def test_language_code(self, faker, num_samples):
language_codes = list(BaseProvider.language_locale_codes)
for _ in range(num_samples):
language_code = faker.language_code()
assert language_code in language_codes
def test_random_digit(self, faker, num_samples):
samples = [faker.random_digit() for _ in range(num_samples * 10)]
assert set(samples) == set(range(10))
def test_random_digit_not_null(self, faker, num_samples):
samples = [faker.random_digit_not_null() for _ in range(num_samples * 10)]
assert set(samples) == set(range(1, 10))
def test_random_digit_above_two(self, faker, num_samples):
samples = [faker.random_digit_above_two() for _ in range(num_samples * 10)]
assert set(samples) == set(range(2, 10))
def test_random_digit_or_empty(self, faker, num_samples):
expected = set(range(10))
expected.add("")
samples = [faker.random_digit_or_empty() for _ in range(num_samples * 10)]
assert set(samples) == expected
def test_random_digit_not_null_or_empty(self, faker, num_samples):
expected = set(range(1, 10))
expected.add("")
samples = [faker.random_digit_not_null_or_empty() for _ in range(num_samples * 10)]
assert set(samples) == expected
def test_random_number(self, faker):
number = faker.random_number(10, True)
assert len(str(number)) == 10
# Digits parameter < 0
with pytest.raises(ValueError):
number = faker.random_number(-1, True)
# Digits parameter < 1 with fix_len=True
with pytest.raises(ValueError):
number = faker.random_number(0, True)
@pytest.mark.parametrize(
"text,pattern",
[
("", r""),
("abcd", r"abcd"),
("#" * 100, r"[0-9]{100}"),
("%" * 100, r"[1-9]{100}"),
("!" * 100, r"[0-9]{,100}"),
("@" * 100, r"[0-9]{,100}"),
(
"##!abc %%@def##!" * 100,
r"(?:[0-9]{2,3}abc [1-9]{2,3}def[0-9]{2,3}){100}",
),
(
"#@@#^?あ5漢!!%%@" * 100,
r"(?:\d[1-9]{,2}\d\^\?あ5漢\d{,2}[1-9]{2}[1-9]*){100}",
),
],
ids=[
"empty_string",
"no_valid_placeholders",
"only_number_signs",
"only_percent_signs",
"only_exclamation_marks",
"only_at_symbols",
"with_ascii_characters",
"with_other_symbols_and_non_ascii",
],
)
def test_numerify(self, faker, num_samples, text, pattern):
for _ in range(num_samples):
numerified = faker.numerify(text)
assert re.fullmatch(pattern, numerified)
@pytest.mark.parametrize(
"text,letters,pattern",
[
("", string.ascii_letters, r""),
("abcd", string.ascii_letters, r"abcd"),
("???", string.ascii_letters, r"[0-9a-zA-Z]{3}"),
("???", "aBcDeFgHiJ12345", r"[1-5aBcDeFgHiJ]{3}"),
("??Xr^#7p??", "AbCdخあ5漢7Я", r"[AbCdخあ5漢7Я]{2}Xr\^#7p[AbCdخあ5漢7Я]{2}"),
],
ids=[
"empty_string",
"no_valid_placeholders",
"letters_using_whole_ascii",
"letters_using_ascii_subset",
"pattern_with_other_symbols_and_letters_using_non_ascii",
],
)
def test_lexify(self, faker, num_samples, text, letters, pattern):
for _ in range(num_samples):
lexified = faker.lexify(text, letters=letters)
assert re.fullmatch(pattern, lexified)
@pytest.mark.parametrize(
"text,letters,pattern",
[
("", string.ascii_letters, r""),
("abcd", string.ascii_letters, r"abcd"),
("???", string.ascii_letters, r"[0-9a-zA-Z]{3}"),
("???", "aBcDeFgHiJ12345", r"[1-5aBcDeFgHiJ]{3}"),
("#%!@???", string.ascii_letters, r"\d[1-9]\d*[1-9]*[0-9a-zA-Z]{3}"),
("#%!@???", "aBcDeFgHiJ12345", r"\d[1-9]\d*[1-9]*[1-5aBcDeFgHiJ]{3}"),
(
"#%!@??Xr7p??",
"AbCdخあ5漢7Я",
r"\d[1-9]\d*[1-9]*[AbCdخあ5漢7Я]{2}Xr7p[AbCdخあ5漢7Я]{2}",
),
],
ids=[
"empty_string",
"no_valid_placeholders",
"simple_pattern_and_letters_using_whole_ascii",
"simple_pattern_and_letters_using_ascii_subset",
"more_complex_pattern_and_letters_using_whole_ascii",
"more_complex_pattern_and_letters_using_ascii_subset",
"more_complex_pattern_with_other_symbols_and_letters_using_non_ascii",
],
)
def test_bothify(self, faker, num_samples, text, letters, pattern):
for _ in range(num_samples):
bothified = faker.bothify(text, letters=letters)
assert re.fullmatch(pattern, bothified)
@pytest.mark.parametrize(
"text,upper,pattern",
[
("", False, r""),
("", True, r""),
("abcd", False, r"abcd"),
("abcd", True, r"abcd"),
("^^^^", False, r"[0-9a-f]{4}"),
("^^^^", True, r"[0-9A-F]{4}"),
(
"Abc ^^^ %^^^?あ5漢!#^^",
False,
r"Abc [0-9a-f]{3} %[0-9a-f]{3}\?あ5漢!#[0-9a-f]{2}",
),
(
"Abc ^^^ %^^^?あ5漢!#^^",
True,
r"Abc [0-9A-F]{3} %[0-9A-F]{3}\?あ5漢!#[0-9A-F]{2}",
),
],
ids=[
"empty_string_lowercase",
"empty_string_uppercase",
"no_circumflex_lowercase",
"no_circumflex_uppercase",
"simple_pattern_lowercase",
"simple_pattern_uppercase",
"complex_pattern_lowercase",
"complex_pattern_uppercase",
],
)
def test_hexify(self, faker, num_samples, text, upper, pattern):
for _ in range(num_samples):
hexified = faker.hexify(text, upper=upper)
assert re.fullmatch(pattern, hexified)
def test_random_letter(self, faker, num_samples):
for _ in range(num_samples):
letter = faker.random_letter()
assert letter.isalpha()
@pytest.mark.parametrize(
"length",
[0, 1, 2],
ids=[
"empty_list",
"list_with_one_element",
"list_with_two_elements",
],
)
def test_random_letters(self, faker, length):
letters = faker.random_letters(length=length)
assert len(letters) == length
assert isinstance(letters, list)
for letter in letters:
assert letter.isalpha()
def test_random_lowercase_letter(self, faker, num_samples):
for _ in range(num_samples):
letter = faker.random_lowercase_letter()
assert letter.isalpha() and letter.lower() == letter
def test_random_uppercase_letter(self, faker, num_samples):
for _ in range(num_samples):
letter = faker.random_uppercase_letter()
assert letter.isalpha() and letter.upper() == letter
def test_random_element(self, faker, num_samples):
# dicts not allowed because they introduce dependency on PYTHONHASHSEED
with pytest.raises(ValueError):
faker.random_element({})
choices = ("a", "b", "c", "d")
for _ in range(num_samples):
assert faker.random_element(choices) in choices
choices = OrderedDict([("a", 5), ("b", 2), ("c", 2), ("d", 1)])
for _ in range(num_samples):
assert faker.random_element(choices) in choices
choices = OrderedDict([("a", 0.5), ("b", 0.2), ("c", 0.2), ("d", 0.1)])
for _ in range(num_samples):
assert faker.random_element(choices) in choices
def test_random_sample(self, faker):
# Too many items requested
with pytest.raises(ValueError):
faker.random_sample("abcde", 6)
# Same length
sample = faker.random_sample("abcd", 4)
assert sorted(sample) == list("abcd")
sample = faker.random_sample("abcde", 5)
assert sorted(sample) == list("abcde")
# Length = 3
sample = faker.random_sample("abcde", 3)
assert len(sample) == 3
assert set(sample).issubset(set("abcde"))
# Length = 1
sample = faker.random_sample("abcde", 1)
assert len(sample) == 1
assert set(sample).issubset(set("abcde"))
# Length = 0
sample = faker.random_sample("abcde", 0)
assert sample == []
def test_randomize_nb_elements(self, faker, num_samples):
assert faker.randomize_nb_elements(number=1, le=True, ge=True) == 1
assert faker.randomize_nb_elements(le=True, ge=True) == 10
assert faker.randomize_nb_elements(min=42) == 42
assert faker.randomize_nb_elements(max=1) == 1
number = 9999
lower_bound = int(number * 0.6)
upper_bound = int(number * 1.4)
for _ in range(num_samples):
res = faker.randomize_nb_elements(number=number, le=True)
assert res >= lower_bound
assert res <= number, f"{res!r} is not <= than {number!r}"
for _ in range(num_samples):
res = faker.randomize_nb_elements(number=number, ge=True)
assert number <= res <= upper_bound
for _ in range(num_samples):
res = faker.randomize_nb_elements(number=number)
assert lower_bound <= res <= upper_bound
| TestBaseProvider |
python | django__django | tests/custom_managers/models.py | {
"start": 6075,
"end": 6353
} | class ____(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.OneToOneField(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
| OneToOneRestrictedModel |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_metadata.py | {
"start": 1965,
"end": 5020
} | class ____:
def test_fail_when_no_language_tags(self, mocker):
# Arrange
connector = mocker.MagicMock(metadata={"tags": []})
# Act
result = metadata.CheckConnectorLanguageTag()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert result.message == "Language tag is missing in the metadata file"
def test_fail_when_multiple_language_tags(self, mocker):
# Arrange
connector = mocker.MagicMock(metadata={"tags": ["language:python", "language:java"]})
# Act
result = metadata.CheckConnectorLanguageTag()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert result.message == "Multiple language tags found in the metadata file: ['language:python', 'language:java']"
def test_fail_when_java_tag_on_python_connector(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(metadata={"tags": ["language:java"]}, code_directory=tmp_path)
code_directory = tmp_path
(code_directory / consts.PYPROJECT_FILE_NAME).touch()
# Act
result = metadata.CheckConnectorLanguageTag()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "Expected language tag 'language:python'" in result.message
assert "but found 'language:java'" in result.message
def test_fail_when_python_tag_on_java_connector(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(metadata={"tags": ["language:python"]}, code_directory=tmp_path)
code_directory = tmp_path
(code_directory / consts.GRADLE_FILE_NAME).touch()
# Act
result = metadata.CheckConnectorLanguageTag()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "Expected language tag 'language:java'" in result.message
assert "but found 'language:python'" in result.message
def test_pass_when_python(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(metadata={"tags": ["language:python"]}, code_directory=tmp_path)
code_directory = tmp_path
(code_directory / consts.PYPROJECT_FILE_NAME).touch()
# Act
result = metadata.CheckConnectorLanguageTag()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "Language tag language:python is present in the metadata file"
def test_pass_when_java(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(metadata={"tags": ["language:java"]}, code_directory=tmp_path)
code_directory = tmp_path
(code_directory / consts.GRADLE_FILE_NAME).touch()
# Act
result = metadata.CheckConnectorLanguageTag()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "Language tag language:java is present in the metadata file"
| TestCheckConnectorLanguageTag |
python | Pylons__pyramid | tests/test_security.py | {
"start": 16772,
"end": 16863
} | class ____:
def __init__(self, *arg, **kw):
self.__dict__.update(kw)
| DummyContext |
python | paramiko__paramiko | paramiko/server.py | {
"start": 26562,
"end": 30457
} | class ____(threading.Thread):
"""
Handler for a subsystem in server mode. If you create a subclass of this
class and pass it to `.Transport.set_subsystem_handler`, an object of this
class will be created for each request for this subsystem. Each new object
will be executed within its own new thread by calling `start_subsystem`.
When that method completes, the channel is closed.
For example, if you made a subclass ``MP3Handler`` and registered it as the
handler for subsystem ``"mp3"``, then whenever a client has successfully
authenticated and requests subsystem ``"mp3"``, an object of class
``MP3Handler`` will be created, and `start_subsystem` will be called on
it from a new thread.
"""
def __init__(self, channel, name, server):
"""
Create a new handler for a channel. This is used by `.ServerInterface`
to start up a new handler when a channel requests this subsystem. You
don't need to override this method, but if you do, be sure to pass the
``channel`` and ``name`` parameters through to the original
``__init__`` method here.
:param .Channel channel: the channel associated with this
subsystem request.
:param str name: name of the requested subsystem.
:param .ServerInterface server:
the server object for the session that started this subsystem
"""
threading.Thread.__init__(self, target=self._run)
self.__channel = channel
self.__transport = channel.get_transport()
self.__name = name
self.__server = server
def get_server(self):
"""
Return the `.ServerInterface` object associated with this channel and
subsystem.
"""
return self.__server
def _run(self):
try:
self.__transport._log(
DEBUG, "Starting handler for subsystem {}".format(self.__name)
)
self.start_subsystem(self.__name, self.__transport, self.__channel)
except Exception as e:
self.__transport._log(
ERROR,
'Exception in subsystem handler for "{}": {}'.format(
self.__name, e
),
)
self.__transport._log(ERROR, util.tb_strings())
try:
self.finish_subsystem()
except:
pass
def start_subsystem(self, name, transport, channel):
"""
Process an ssh subsystem in server mode. This method is called on a
new object (and in a new thread) for each subsystem request. It is
assumed that all subsystem logic will take place here, and when the
subsystem is finished, this method will return. After this method
returns, the channel is closed.
The combination of ``transport`` and ``channel`` are unique; this
handler corresponds to exactly one `.Channel` on one `.Transport`.
.. note::
It is the responsibility of this method to exit if the underlying
`.Transport` is closed. This can be done by checking
`.Transport.is_active` or noticing an EOF on the `.Channel`. If
this method loops forever without checking for this case, your
Python interpreter may refuse to exit because this thread will
still be running.
:param str name: name of the requested subsystem.
:param .Transport transport: the server-mode `.Transport`.
:param .Channel channel: the channel associated with this subsystem
request.
"""
pass
def finish_subsystem(self):
"""
Perform any cleanup at the end of a subsystem. The default
implementation just closes the channel.
.. versionadded:: 1.1
"""
self.__channel.close()
| SubsystemHandler |
python | ray-project__ray | python/ray/tune/examples/tf_mnist_example.py | {
"start": 1212,
"end": 5060
} | class ____(tune.Trainable):
def setup(self, config):
# IMPORTANT: See the above note.
import tensorflow as tf
# Use FileLock to avoid race conditions.
with FileLock(os.path.expanduser("~/.tune.lock")):
(x_train, y_train), (x_test, y_test) = load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
self.train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
self.train_ds = self.train_ds.shuffle(10000).batch(config.get("batch", 32))
self.test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
self.model = MyModel(hiddens=config.get("hiddens", 128))
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
self.optimizer = tf.keras.optimizers.Adam()
self.train_loss = tf.keras.metrics.Mean(name="train_loss")
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name="train_accuracy"
)
self.test_loss = tf.keras.metrics.Mean(name="test_loss")
self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name="test_accuracy"
)
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = self.model(images)
loss = self.loss_object(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.model.trainable_variables)
)
self.train_loss(loss)
self.train_accuracy(labels, predictions)
@tf.function
def test_step(images, labels):
predictions = self.model(images)
t_loss = self.loss_object(labels, predictions)
self.test_loss(t_loss)
self.test_accuracy(labels, predictions)
self.tf_train_step = train_step
self.tf_test_step = test_step
def save_checkpoint(self, checkpoint_dir: str):
return None
def load_checkpoint(self, checkpoint):
return None
def step(self):
self.train_loss.reset_states()
self.train_accuracy.reset_states()
self.test_loss.reset_states()
self.test_accuracy.reset_states()
for idx, (images, labels) in enumerate(self.train_ds):
if idx > MAX_TRAIN_BATCH: # This is optional and can be removed.
break
self.tf_train_step(images, labels)
for test_images, test_labels in self.test_ds:
self.tf_test_step(test_images, test_labels)
# It is important to return tf.Tensors as numpy objects.
return {
"epoch": self.iteration,
"loss": self.train_loss.result().numpy(),
"accuracy": self.train_accuracy.result().numpy() * 100,
"test_loss": self.test_loss.result().numpy(),
"mean_accuracy": self.test_accuracy.result().numpy() * 100,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
args, _ = parser.parse_known_args()
tuner = tune.Tuner(
MNISTTrainable,
tune_config=tune.TuneConfig(
metric="test_loss",
mode="min",
),
run_config=tune.RunConfig(
stop={"training_iteration": 5 if args.smoke_test else 50},
verbose=1,
),
param_space={"hiddens": tune.grid_search([32, 64, 128])},
)
results = tuner.fit()
print("Best hyperparameters found were: ", results.get_best_result().config)
| MNISTTrainable |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/dependency.py | {
"start": 1738,
"end": 3998
} | class ____(
NamedTuple(
"Node",
[
("name", PublicAttr[str]),
("alias", PublicAttr[Optional[str]]),
("tags", PublicAttr[Mapping[str, Any]]),
("hook_defs", PublicAttr[AbstractSet[HookDefinition]]),
("retry_policy", PublicAttr[Optional[RetryPolicy]]),
],
)
):
"""Identifies an instance of a node in a graph dependency structure.
Args:
name (str): Name of the node of which this is an instance.
alias (Optional[str]): Name specific to this instance of the node. Necessary when there are
multiple instances of the same node.
tags (Optional[Dict[str, Any]]): Optional tags values to extend or override those
set on the node definition.
hook_defs (Optional[AbstractSet[HookDefinition]]): A set of hook definitions applied to the
node instance.
Examples:
In general, users should prefer not to construct this class directly or use the
:py:class:`JobDefinition` API that requires instances of this class. Instead, use the
:py:func:`@job <job>` API:
.. code-block:: python
from dagster import job
@job
def my_job():
other_name = some_op.alias('other_name')
some_graph(other_name(some_op))
"""
def __new__(
cls,
name: str,
alias: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
retry_policy: Optional[RetryPolicy] = None,
):
return super().__new__(
cls,
name=check.str_param(name, "name"),
alias=check.opt_str_param(alias, "alias"),
tags=check.opt_mapping_param(tags, "tags", value_type=str, key_type=str),
hook_defs=check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition),
retry_policy=check.opt_inst_param(retry_policy, "retry_policy", RetryPolicy),
)
# Needs to be hashable because this class is used as a key in dependencies dicts
def __hash__(self) -> int:
if not hasattr(self, "_hash"):
self._hash = hash_collection(self)
return self._hash
| NodeInvocation |
python | google__pytype | pytype/module_utils_test.py | {
"start": 1907,
"end": 3813
} | class ____(unittest.TestCase):
"""Test module_utils.infer_module."""
def assert_module_equal(self, module, path, target, name, kind="Local"):
self.assertEqual(
module.path.rstrip(path_utils.sep), path.rstrip(path_utils.sep)
)
self.assertEqual(module.target, target)
self.assertEqual(module.name, name)
self.assertEqual(module.kind, kind)
def test_simple_name(self):
mod = module_utils.infer_module(
expand(file_utils.replace_separator("foo/bar.py")), [expand("foo")]
)
self.assert_module_equal(mod, expand("foo"), "bar.py", "bar")
def test_name_in_package(self):
mod = module_utils.infer_module(
expand(file_utils.replace_separator("foo/bar/baz.py")), [expand("foo")]
)
self.assert_module_equal(
mod,
expand("foo"),
file_utils.replace_separator("bar/baz.py"),
"bar.baz",
)
def test_multiple_paths(self):
pythonpath = [
expand("foo"),
expand(file_utils.replace_separator("bar/baz")),
expand("bar"),
]
mod = module_utils.infer_module(
expand(file_utils.replace_separator("bar/baz/qux.py")), pythonpath
)
self.assert_module_equal(
mod, expand(file_utils.replace_separator("bar/baz")), "qux.py", "qux"
)
mod = module_utils.infer_module(
expand(file_utils.replace_separator("bar/qux.py")), pythonpath
)
self.assert_module_equal(mod, expand("bar"), "qux.py", "qux")
def test_not_found(self):
mod = module_utils.infer_module(
expand(file_utils.replace_separator("bar/baz.py")), ["foo"]
)
expected_target = expand(file_utils.replace_separator("bar/baz.py"))
expected_name, _ = path_utils.splitext(
expected_target.replace(path_utils.sep, ".")
)
self.assert_module_equal(mod, "", expected_target, expected_name)
if __name__ == "__main__":
unittest.main()
| TestInferModule |
python | ansible__ansible | lib/ansible/_internal/_errors/_error_utils.py | {
"start": 1131,
"end": 1353
} | class ____(_ambient_context.AmbientContextBase):
"""When active, this context will redact annotated source lines, showing only the origin."""
@dataclasses.dataclass(kw_only=True, frozen=True)
| RedactAnnotatedSourceContext |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 27327,
"end": 28053
} | class ____(object):
def __init__(self, ftype, asm, constraint, side_effect=False):
self.type = ftype.return_type
self.function_type = ftype
self.asm = asm
self.constraint = constraint
self.side_effect = side_effect
def descr(self, buf):
sideeffect = 'sideeffect' if self.side_effect else ''
fmt = 'asm {sideeffect} "{asm}", "{constraint}"'
buf.append(fmt.format(sideeffect=sideeffect, asm=self.asm,
constraint=self.constraint))
def get_reference(self):
buf = []
self.descr(buf)
return "".join(buf)
def __str__(self):
return "{0} {1}".format(self.type, self.get_reference())
| InlineAsm |
python | ray-project__ray | python/ray/train/_internal/dl_predictor.py | {
"start": 403,
"end": 3570
} | class ____(Predictor):
@abc.abstractmethod
def _arrays_to_tensors(
self,
numpy_arrays: Union[np.ndarray, Dict[str, np.ndarray]],
dtype: Optional[Union[TensorDtype, Dict[str, TensorDtype]]],
) -> Union[TensorType, Dict[str, TensorType]]:
"""Converts a NumPy ndarray batch to the tensor type for the DL framework.
Args:
numpy_array: The numpy array to convert to a tensor.
dtype: The tensor dtype to use when creating the DL tensor.
ndarray: A (dict of) NumPy ndarray(s) that we wish to convert to a (dict of)
tensor(s).
dtype: A (dict of) tensor dtype(s) to use when creating the DL tensor; if
None, the dtype will be inferred from the NumPy ndarray data.
Returns:
A deep learning framework specific tensor.
"""
raise NotImplementedError
@abc.abstractmethod
def _tensor_to_array(self, tensor: TensorType) -> np.ndarray:
"""Converts tensor framework specific tensor to a numpy array.
Args:
tensor: A framework specific tensor.
Returns:
A numpy array representing the input tensor.
"""
raise NotImplementedError
@abc.abstractmethod
@DeveloperAPI
def call_model(
self, inputs: Union[TensorType, Dict[str, TensorType]]
) -> Union[TensorType, Dict[str, TensorType]]:
"""Inputs the tensor to the model for this Predictor and returns the result.
Args:
inputs: The tensor to input to the model.
Returns:
A tensor or dictionary of tensors containing the model output.
"""
raise NotImplementedError
@classmethod
@DeveloperAPI
def preferred_batch_format(cls) -> BatchFormat:
return BatchFormat.NUMPY
def _predict_pandas(
self,
data: pd.DataFrame,
dtype: Optional[Union[TensorDtype, Dict[str, TensorDtype]]],
) -> pd.DataFrame:
numpy_input = _convert_pandas_to_batch_type(
data,
BatchFormat.NUMPY,
self._cast_tensor_columns,
)
numpy_output = self._predict_numpy(numpy_input, dtype)
return _convert_batch_type_to_pandas(numpy_output)
def _predict_numpy(
self,
data: Union[np.ndarray, Dict[str, np.ndarray]],
dtype: Optional[Union[TensorDtype, Dict[str, TensorDtype]]],
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
# Single column selection return numpy array so preprocessors can be
# reused in both training and prediction
if isinstance(data, dict) and len(data) == 1:
data = next(iter(data.values()))
model_input = self._arrays_to_tensors(data, dtype)
model_output = self.call_model(model_input)
# TODO (jiaodong): Investigate perf implication of this.
# Move DL Tensor to CPU and convert to numpy.
if isinstance(model_output, dict):
return {k: self._tensor_to_array(v) for k, v in model_output.items()}
else:
return {"predictions": self._tensor_to_array(model_output)}
| DLPredictor |
python | RaRe-Technologies__gensim | gensim/test/test_parsing.py | {
"start": 1630,
"end": 4059
} | class ____(unittest.TestCase):
def test_strip_numeric(self):
self.assertEqual(strip_numeric("salut les amis du 59"), "salut les amis du ")
def test_strip_short(self):
self.assertEqual(strip_short("salut les amis du 59", 3), "salut les amis")
def test_strip_tags(self):
self.assertEqual(strip_tags("<i>Hello</i> <b>World</b>!"), "Hello World!")
def test_strip_multiple_whitespaces(self):
self.assertEqual(strip_multiple_whitespaces("salut les\r\nloulous!"), "salut les loulous!")
def test_strip_non_alphanum(self):
self.assertEqual(strip_non_alphanum("toto nf-kappa titi"), "toto nf kappa titi")
def test_split_alphanum(self):
self.assertEqual(split_alphanum("toto diet1 titi"), "toto diet 1 titi")
self.assertEqual(split_alphanum("toto 1diet titi"), "toto 1 diet titi")
def test_strip_stopwords(self):
self.assertEqual(remove_stopwords("the world is square"), "world square")
# confirm redifining the global `STOPWORDS` working
with mock.patch('gensim.parsing.preprocessing.STOPWORDS', frozenset(["the"])):
self.assertEqual(remove_stopwords("the world is square"), "world is square")
def test_strip_stopword_tokens(self):
self.assertEqual(remove_stopword_tokens(["the", "world", "is", "sphere"]), ["world", "sphere"])
# confirm redifining the global `STOPWORDS` working
with mock.patch('gensim.parsing.preprocessing.STOPWORDS', frozenset(["the"])):
self.assertEqual(
remove_stopword_tokens(["the", "world", "is", "sphere"]),
["world", "is", "sphere"]
)
def test_strip_short_tokens(self):
self.assertEqual(remove_short_tokens(["salut", "les", "amis", "du", "59"], 3), ["salut", "les", "amis"])
def test_split_on_space(self):
self.assertEqual(split_on_space(" salut les amis du 59 "), ["salut", "les", "amis", "du", "59"])
def test_stem_text(self):
target = \
"while it is quit us to be abl to search a larg " + \
"collect of document almost instantli for a joint occurr " + \
"of a collect of exact words, for mani search purposes, " + \
"a littl fuzzi would help."
self.assertEqual(stem_text(doc5), target)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
unittest.main()
| TestPreprocessing |
python | getsentry__sentry | src/sentry/replays/usecases/query/__init__.py | {
"start": 7589,
"end": 7663
} | class ____:
limit: int
offset: int
@dataclasses.dataclass
| Paginators |
python | run-llama__llama_index | llama-index-core/llama_index/core/response_synthesizers/accumulate.py | {
"start": 695,
"end": 5162
} | class ____(BaseSynthesizer):
"""Accumulate responses from multiple text chunks."""
def __init__(
self,
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
prompt_helper: Optional[PromptHelper] = None,
text_qa_template: Optional[BasePromptTemplate] = None,
output_cls: Optional[Type[BaseModel]] = None,
streaming: bool = False,
use_async: bool = False,
) -> None:
super().__init__(
llm=llm,
callback_manager=callback_manager,
prompt_helper=prompt_helper,
streaming=streaming,
)
self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT_SEL
self._use_async = use_async
self._output_cls = output_cls
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"text_qa_template": self._text_qa_template}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "text_qa_template" in prompts:
self._text_qa_template = prompts["text_qa_template"]
def flatten_list(self, md_array: List[List[Any]]) -> List[Any]:
return [item for sublist in md_array for item in sublist]
def _format_response(self, outputs: List[Any], separator: str) -> str:
responses: List[str] = []
for response in outputs:
responses.append(response or "Empty Response")
return separator.join(
[f"Response {index + 1}: {item}" for index, item in enumerate(responses)]
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
separator: str = "\n---------------------\n",
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Apply the same prompt to text chunks and return async responses."""
if self._streaming:
raise ValueError("Unable to stream in Accumulate response mode")
tasks = [
self._give_responses(
query_str, text_chunk, use_async=True, **response_kwargs
)
for text_chunk in text_chunks
]
flattened_tasks = self.flatten_list(tasks)
outputs = await asyncio.gather(*flattened_tasks)
return self._format_response(outputs, separator)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
separator: str = "\n---------------------\n",
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Apply the same prompt to text chunks and return responses."""
if self._streaming:
raise ValueError("Unable to stream in Accumulate response mode")
tasks = [
self._give_responses(
query_str, text_chunk, use_async=self._use_async, **response_kwargs
)
for text_chunk in text_chunks
]
outputs = self.flatten_list(tasks)
if self._use_async:
outputs = run_async_tasks(outputs)
return self._format_response(outputs, separator)
def _give_responses(
self,
query_str: str,
text_chunk: str,
use_async: bool = False,
**response_kwargs: Any,
) -> List[Any]:
"""Give responses given a query and a corresponding text chunk."""
text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
text_chunks = self._prompt_helper.repack(
text_qa_template, [text_chunk], llm=self._llm
)
predictor: Callable
if self._output_cls is None:
predictor = self._llm.apredict if use_async else self._llm.predict
return [
predictor(
text_qa_template,
context_str=cur_text_chunk,
**response_kwargs,
)
for cur_text_chunk in text_chunks
]
else:
predictor = (
self._llm.astructured_predict
if use_async
else self._llm.structured_predict
)
return [
predictor(
self._output_cls,
text_qa_template,
context_str=cur_text_chunk,
**response_kwargs,
)
for cur_text_chunk in text_chunks
]
| Accumulate |
python | getsentry__sentry | src/sentry/incidents/endpoints/serializers/workflow_engine_incident.py | {
"start": 1282,
"end": 7211
} | class ____(Serializer):
def __init__(self, expand=None):
self.expand = expand or []
priority_to_incident_status: ClassVar[dict[int, int]] = {
PriorityLevel.HIGH.value: IncidentStatus.CRITICAL.value,
PriorityLevel.MEDIUM.value: IncidentStatus.WARNING.value,
}
def get_incident_status(self, priority: int | None, date_ended: datetime | None) -> int:
if priority is None:
raise ValueError("Priority is required to get an incident status")
if date_ended:
return IncidentStatus.CLOSED.value
return self.priority_to_incident_status[priority]
def get_attrs(
self,
item_list: Sequence[GroupOpenPeriod],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> defaultdict[GroupOpenPeriod, dict[str, Any]]:
from sentry.incidents.endpoints.serializers.workflow_engine_detector import (
WorkflowEngineDetectorSerializer,
)
results: DefaultDict[GroupOpenPeriod, dict[str, Any]] = defaultdict()
open_periods_to_detectors = self.get_open_periods_to_detectors(item_list)
alert_rules = {
alert_rule["id"]: alert_rule # we are serializing detectors to look like alert rules
for alert_rule in serialize(
list(open_periods_to_detectors.values()),
user,
WorkflowEngineDetectorSerializer(expand=self.expand),
)
}
alert_rule_detectors = AlertRuleDetector.objects.filter(
detector__in=list(open_periods_to_detectors.values())
).values_list("alert_rule_id", "detector_id")
detector_ids_to_alert_rule_ids = {}
for alert_rule_id, detector_id in alert_rule_detectors:
detector_ids_to_alert_rule_ids[detector_id] = alert_rule_id
for open_period in item_list:
detector_id = open_periods_to_detectors[open_period].id
if detector_id in detector_ids_to_alert_rule_ids:
alert_rule_id = detector_ids_to_alert_rule_ids[detector_id]
else:
alert_rule_id = get_fake_id_from_object_id(detector_id)
results[open_period] = {"projects": [open_period.project.slug]}
results[open_period]["alert_rule"] = alert_rules.get(str(alert_rule_id))
if "activities" in self.expand:
gopas = list(
GroupOpenPeriodActivity.objects.filter(group_open_period__in=item_list)[:1000]
)
open_period_activities = defaultdict(list)
# XXX: the incident endpoint is undocumented, so we aren' on the hook for supporting
# any specific payloads. Since this isn't used on the Sentry side for notification charts,
# I've opted to just use the GroupOpenPeriodActivity serializer.
for gopa, serialized_activity in zip(
gopas,
serialize(gopas, user=user, serializer=GroupOpenPeriodActivitySerializer()),
):
open_period_activities[gopa.group_open_period_id].append(serialized_activity)
for open_period in item_list:
results[open_period]["activities"] = open_period_activities[open_period.id]
return results
def get_open_periods_to_detectors(
self, open_periods: Sequence[GroupOpenPeriod]
) -> dict[GroupOpenPeriod, Detector]:
# open period -> group -> detector via detectorgroup
groups = [op.group for op in open_periods]
group_to_open_periods = defaultdict(list)
for op in open_periods:
group_to_open_periods[op.group].append(op)
detector_groups = DetectorGroup.objects.filter(group__in=groups).select_related(
"group", "detector"
)
groups_to_detectors = {}
for dg in detector_groups:
if dg.detector is not None:
groups_to_detectors[dg.group] = dg.detector
open_periods_to_detectors = {}
for group in group_to_open_periods:
for op in group_to_open_periods[group]:
open_periods_to_detectors[op] = groups_to_detectors[group]
return open_periods_to_detectors
def serialize(
self,
obj: GroupOpenPeriod,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs,
) -> IncidentSerializerResponse:
"""
Temporary serializer to take a GroupOpenPeriod and serialize it for the old incident endpoint
"""
try:
igop = IncidentGroupOpenPeriod.objects.get(group_open_period=obj)
incident_id = igop.incident_id
incident_identifier = igop.incident_identifier
except IncidentGroupOpenPeriod.DoesNotExist:
incident_id = get_fake_id_from_object_id(obj.id)
incident_identifier = incident_id
date_closed = obj.date_ended.replace(second=0, microsecond=0) if obj.date_ended else None
return {
"id": str(incident_id),
"identifier": str(incident_identifier),
"organizationId": str(obj.project.organization.id),
"projects": attrs["projects"],
"alertRule": attrs["alert_rule"],
"activities": attrs["activities"] if "activities" in self.expand else None,
"status": self.get_incident_status(obj.group.priority, obj.date_ended),
"statusMethod": (
IncidentStatusMethod.RULE_TRIGGERED.value
if not date_closed
else IncidentStatusMethod.RULE_UPDATED.value
),
"type": IncidentType.ALERT_TRIGGERED.value,
"title": obj.group.title,
"dateStarted": obj.date_started,
"dateDetected": obj.date_started,
"dateCreated": obj.date_added,
"dateClosed": date_closed,
}
| WorkflowEngineIncidentSerializer |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/scatter_nd_ops_test.py | {
"start": 19203,
"end": 20027
} | class ____(StatefulScatterNdTest):
def setUp(self):
super().setUp()
config.enable_op_determinism()
def tearDown(self):
super().tearDown()
config.disable_op_determinism()
def testDeterminism(self):
ref = variables.Variable(array_ops.zeros([1]))
indices = array_ops.zeros([100000, 1], dtypes.int32)
values = np.random.randn(100000)
self.evaluate(variables.global_variables_initializer())
val = self.evaluate(state_ops.scatter_nd_update(ref, indices, values))
for _ in range(5):
ref2 = variables.Variable(array_ops.zeros([1]))
self.evaluate(variables.global_variables_initializer())
val2 = self.evaluate(state_ops.scatter_nd_update(ref2, indices, values))
self.assertAllEqual(val, val2)
@test_util.with_eager_op_as_function
| StatefulScatterNdDeterminismTest |
python | sqlalchemy__sqlalchemy | test/perf/compiled_extensions/result.py | {
"start": 8746,
"end": 9003
} | class ____:
_echo = False
def __init__(self, dialect):
self.dialect = dialect
def _safe_close_cursor(self, cursor):
cursor.close()
def _handle_dbapi_exception(self, e, *args, **kw):
raise e
@dataclass
| _MockConnection |
python | getsentry__sentry-python | sentry_sdk/envelope.py | {
"start": 664,
"end": 4925
} | class ____:
"""
Represents a Sentry Envelope. The calling code is responsible for adhering to the constraints
documented in the Sentry docs: https://develop.sentry.dev/sdk/envelopes/#data-model. In particular,
each envelope may have at most one Item with type "event" or "transaction" (but not both).
"""
def __init__(
self,
headers=None, # type: Optional[Dict[str, Any]]
items=None, # type: Optional[List[Item]]
):
# type: (...) -> None
if headers is not None:
headers = dict(headers)
self.headers = headers or {}
if items is None:
items = []
else:
items = list(items)
self.items = items
@property
def description(self):
# type: (...) -> str
return "envelope with %s items (%s)" % (
len(self.items),
", ".join(x.data_category for x in self.items),
)
def add_event(
self,
event, # type: Event
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=event), type="event"))
def add_transaction(
self,
transaction, # type: Event
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=transaction), type="transaction"))
def add_profile(
self,
profile, # type: Any
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=profile), type="profile"))
def add_profile_chunk(
self,
profile_chunk, # type: Any
):
# type: (...) -> None
self.add_item(
Item(
payload=PayloadRef(json=profile_chunk),
type="profile_chunk",
headers={"platform": profile_chunk.get("platform", "python")},
)
)
def add_checkin(
self,
checkin, # type: Any
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=checkin), type="check_in"))
def add_session(
self,
session, # type: Union[Session, Any]
):
# type: (...) -> None
if isinstance(session, Session):
session = session.to_json()
self.add_item(Item(payload=PayloadRef(json=session), type="session"))
def add_sessions(
self,
sessions, # type: Any
):
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=sessions), type="sessions"))
def add_item(
self,
item, # type: Item
):
# type: (...) -> None
self.items.append(item)
def get_event(self):
# type: (...) -> Optional[Event]
for items in self.items:
event = items.get_event()
if event is not None:
return event
return None
def get_transaction_event(self):
# type: (...) -> Optional[Event]
for item in self.items:
event = item.get_transaction_event()
if event is not None:
return event
return None
def __iter__(self):
# type: (...) -> Iterator[Item]
return iter(self.items)
def serialize_into(
self,
f, # type: Any
):
# type: (...) -> None
f.write(json_dumps(self.headers))
f.write(b"\n")
for item in self.items:
item.serialize_into(f)
def serialize(self):
# type: (...) -> bytes
out = io.BytesIO()
self.serialize_into(out)
return out.getvalue()
@classmethod
def deserialize_from(
cls,
f, # type: Any
):
# type: (...) -> Envelope
headers = parse_json(f.readline())
items = []
while 1:
item = Item.deserialize_from(f)
if item is None:
break
items.append(item)
return cls(headers=headers, items=items)
@classmethod
def deserialize(
cls,
bytes, # type: bytes
):
# type: (...) -> Envelope
return cls.deserialize_from(io.BytesIO(bytes))
def __repr__(self):
# type: (...) -> str
return "<Envelope headers=%r items=%r>" % (self.headers, self.items)
| Envelope |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/teams/tutorial001_py310.py | {
"start": 1008,
"end": 4809
} | class ____(SQLModel):
name: str | None = None
secret_name: str | None = None
age: int | None = None
team_id: int | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(*, session: Session = Depends(get_session), hero: HeroCreate):
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes(
*,
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, le=100),
):
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(*, session: Session = Depends(get_session), hero_id: int):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(
*, session: Session = Depends(get_session), hero_id: int, hero: HeroUpdate
):
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
db_hero.sqlmodel_update(hero_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.delete("/heroes/{hero_id}")
def delete_hero(*, session: Session = Depends(get_session), hero_id: int):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
@app.post("/teams/", response_model=TeamPublic)
def create_team(*, session: Session = Depends(get_session), team: TeamCreate):
db_team = Team.model_validate(team)
session.add(db_team)
session.commit()
session.refresh(db_team)
return db_team
@app.get("/teams/", response_model=list[TeamPublic])
def read_teams(
*,
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, le=100),
):
teams = session.exec(select(Team).offset(offset).limit(limit)).all()
return teams
@app.get("/teams/{team_id}", response_model=TeamPublic)
def read_team(*, team_id: int, session: Session = Depends(get_session)):
team = session.get(Team, team_id)
if not team:
raise HTTPException(status_code=404, detail="Team not found")
return team
@app.patch("/teams/{team_id}", response_model=TeamPublic)
def update_team(
*,
session: Session = Depends(get_session),
team_id: int,
team: TeamUpdate,
):
db_team = session.get(Team, team_id)
if not db_team:
raise HTTPException(status_code=404, detail="Team not found")
team_data = team.model_dump(exclude_unset=True)
db_team.sqlmodel_update(team_data)
session.add(db_team)
session.commit()
session.refresh(db_team)
return db_team
@app.delete("/teams/{team_id}")
def delete_team(*, session: Session = Depends(get_session), team_id: int):
team = session.get(Team, team_id)
if not team:
raise HTTPException(status_code=404, detail="Team not found")
session.delete(team)
session.commit()
return {"ok": True}
| HeroUpdate |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 77072,
"end": 77863
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"branch",
"file_changes",
"message",
"expected_head_oid",
"client_mutation_id",
)
branch = sgqlc.types.Field(
sgqlc.types.non_null(CommittableBranch), graphql_name="branch"
)
file_changes = sgqlc.types.Field("FileChanges", graphql_name="fileChanges")
message = sgqlc.types.Field(
sgqlc.types.non_null(CommitMessage), graphql_name="message"
)
expected_head_oid = sgqlc.types.Field(
sgqlc.types.non_null(GitObjectID), graphql_name="expectedHeadOid"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| CreateCommitOnBranchInput |
python | urllib3__urllib3 | test/test_fields.py | {
"start": 250,
"end": 4409
} | class ____:
@pytest.mark.parametrize(
"filename, content_types",
[
("image.jpg", ["image/jpeg", "image/pjpeg"]),
("notsure", ["application/octet-stream"]),
(None, ["application/octet-stream"]),
],
)
def test_guess_content_type(
self, filename: str | None, content_types: list[str]
) -> None:
assert guess_content_type(filename) in content_types
def test_create(self) -> None:
simple_field = RequestField("somename", "data")
assert simple_field.render_headers() == "\r\n"
filename_field = RequestField("somename", "data", filename="somefile.txt")
assert filename_field.render_headers() == "\r\n"
headers_field = RequestField(
"somename", "data", headers={"Content-Length": "4"}
)
assert headers_field.render_headers() == "Content-Length: 4\r\n\r\n"
def test_make_multipart(self) -> None:
field = RequestField("somename", "data")
field.make_multipart(content_type="image/jpg", content_location="/test")
assert (
field.render_headers()
== 'Content-Disposition: form-data; name="somename"\r\n'
"Content-Type: image/jpg\r\n"
"Content-Location: /test\r\n"
"\r\n"
)
def test_make_multipart_empty_filename(self) -> None:
field = RequestField("somename", "data", "")
field.make_multipart(content_type="application/octet-stream")
assert (
field.render_headers()
== 'Content-Disposition: form-data; name="somename"; filename=""\r\n'
"Content-Type: application/octet-stream\r\n"
"\r\n"
)
def test_render_parts(self) -> None:
field = RequestField("somename", "data")
parts = field._render_parts({"name": "value", "filename": "value"})
assert 'name="value"' in parts
assert 'filename="value"' in parts
parts = field._render_parts([("name", "value"), ("filename", "value")])
assert parts == 'name="value"; filename="value"'
@pytest.mark.parametrize(
("value", "expect"),
[("näme", "filename*=utf-8''n%C3%A4me"), (b"name", 'filename="name"')],
)
def test_format_header_param_rfc2231_deprecated(
self, value: bytes | str, expect: str
) -> None:
with pytest.deprecated_call(match=r"urllib3 v2\.1\.0"):
param = format_header_param_rfc2231("filename", value)
assert param == expect
def test_format_header_param_html5_deprecated(self) -> None:
with pytest.deprecated_call(match=r"urllib3 v2\.1\.0"):
param2 = format_header_param_html5("filename", "name")
with pytest.deprecated_call(match=r"urllib3 v2\.1\.0"):
param1 = format_header_param("filename", "name")
assert param1 == param2
@pytest.mark.parametrize(
("value", "expect"),
[
("name", "name"),
("näme", "näme"),
(b"n\xc3\xa4me", "näme"),
("ski ⛷.txt", "ski ⛷.txt"),
("control \x1a\x1b\x1c", "control \x1a\x1b\x1c"),
("backslash \\", "backslash \\"),
("quotes '\"", "quotes '%22"),
("newline \n\r", "newline %0A%0D"),
],
)
def test_format_multipart_header_param(
self, value: bytes | str, expect: str
) -> None:
param = format_multipart_header_param("filename", value)
assert param == f'filename="{expect}"'
def test_from_tuples(self) -> None:
field = RequestField.from_tuples("file", ("スキー旅行.txt", "data"))
cd = field.headers["Content-Disposition"]
assert cd == 'form-data; name="file"; filename="スキー旅行.txt"'
def test_from_tuples_rfc2231(self) -> None:
with pytest.deprecated_call(match=r"urllib3 v2\.1\.0"):
field = RequestField.from_tuples(
"file", ("näme", "data"), header_formatter=format_header_param_rfc2231
)
cd = field.headers["Content-Disposition"]
assert cd == "form-data; name=\"file\"; filename*=utf-8''n%C3%A4me"
| TestRequestField |
python | celery__celery | celery/exceptions.py | {
"start": 4504,
"end": 4595
} | class ____(CeleryError):
"""Base class for task-related semi-predicates."""
| TaskPredicate |
python | jupyterlab__jupyterlab | jupyterlab/tests/test_app.py | {
"start": 4016,
"end": 6446
} | class ____(ProcessApp):
"""A process app for running tests, includes a mock contents directory."""
allow_origin = "*"
def initialize_templates(self):
self.static_paths = [_create_static_dir()]
self.template_paths = [_create_template_dir()]
def initialize_settings(self):
self.env_patch = TestEnv()
self.env_patch.start()
ProcessApp.__init__(self)
self.settings["allow_origin"] = ProcessTestApp.allow_origin
self.static_dir = self.static_paths[0]
self.template_dir = self.template_paths[0]
self.schemas_dir = _create_schemas_dir()
self.user_settings_dir = _create_user_settings_dir()
self.workspaces_dir = _create_workspaces_dir()
self._install_default_kernels()
self.settings["kernel_manager"].default_kernel_name = "echo"
super().initialize_settings()
def _install_kernel(self, kernel_name, kernel_spec):
"""Install a kernel spec to the data directory.
Parameters
----------
kernel_name: str
Name of the kernel.
kernel_spec: dict
The kernel spec for the kernel
"""
paths = jupyter_core.paths
kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", kernel_name)
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, "kernel.json"), "w") as f:
f.write(json.dumps(kernel_spec))
def _install_default_kernels(self):
# Install echo and ipython kernels - should be done after env patch
self._install_kernel(
kernel_name="echo",
kernel_spec={
"argv": [
sys.executable,
"-m",
"jupyterlab.tests.echo_kernel",
"-f",
"{connection_file}",
],
"display_name": "Echo Kernel",
"language": "echo",
},
)
paths = jupyter_core.paths
ipykernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", "ipython")
write_kernel_spec(ipykernel_dir)
def _process_finished(self, future):
self.serverapp.http_server.stop()
self.serverapp.io_loop.stop()
self.env_patch.stop()
try:
os._exit(future.result())
except Exception as e:
self.log.error(str(e))
os._exit(1)
| ProcessTestApp |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 24855,
"end": 25682
} | class ____:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, np.prod, a)
assert_raises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
| TestProd |
python | modin-project__modin | versioneer.py | {
"start": 67910,
"end": 86836
} | class ____(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose: bool = False) -> Dict[str, Any]:
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or bool(cfg.verbose) # `bool()` used to avoid `None`
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version() -> str:
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass: Optional[Dict[str, Any]] = None):
"""Get the custom setuptools subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to setuptools
from setuptools import Command
class cmd_version(Command):
description = "report generated version string"
user_options: List[Tuple[str, str, str]] = []
boolean_options: List[str] = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# pip install -e . and setuptool/editable_wheel will invoke build_py
# but the build_py command is not expected to copy any files.
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py: Any = cmds["build_py"]
else:
from setuptools.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self) -> None:
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
if getattr(self, "editable_mode", False):
# During editable installs `.py` and data files are
# not copied to build_lib
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "build_ext" in cmds:
_build_ext: Any = cmds["build_ext"]
else:
from setuptools.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self) -> None:
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if not cfg.versionfile_build:
return
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
if not os.path.exists(target_versionfile):
print(
f"Warning: {target_versionfile} does not exist, skipping "
"version update. This can happen if you are running build_ext "
"without first running build_py."
)
return
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe # type: ignore
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self) -> None:
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
try:
from py2exe.setuptools_buildexe import py2exe as _py2exe # type: ignore
except ImportError:
from py2exe.distutils_buildexe import py2exe as _py2exe # type: ignore
class cmd_py2exe(_py2exe):
def run(self) -> None:
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# sdist farms its file list building out to egg_info
if "egg_info" in cmds:
_egg_info: Any = cmds["egg_info"]
else:
from setuptools.command.egg_info import egg_info as _egg_info
class cmd_egg_info(_egg_info):
def find_sources(self) -> None:
# egg_info.find_sources builds the manifest list and writes it
# in one shot
super().find_sources()
# Modify the filelist and normalize it
root = get_root()
cfg = get_config_from_root(root)
self.filelist.append("versioneer.py")
if cfg.versionfile_source:
# There are rare cases where versionfile_source might not be
# included by default, so we must be explicit
self.filelist.append(cfg.versionfile_source)
self.filelist.sort()
self.filelist.remove_duplicates()
# The write method is hidden in the manifest_maker instance that
# generated the filelist and was thrown away
# We will instead replicate their final normalization (to unicode,
# and POSIX-style paths)
from setuptools import unicode_utils
normalized = [
unicode_utils.filesys_decode(f).replace(os.sep, "/")
for f in self.filelist.files
]
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
with open(manifest_filename, "w") as fobj:
fobj.write("\n".join(normalized))
cmds["egg_info"] = cmd_egg_info
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist: Any = cmds["sdist"]
else:
from setuptools.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self) -> None:
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir: str, files: List[str]) -> None:
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
OLD_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
INIT_PY_SNIPPET = """
from . import {0}
__version__ = {0}.get_versions()['version']
"""
def do_setup() -> int:
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
maybe_ipy: Optional[str] = ipy
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if OLD_SNIPPET in old:
print(" replacing boilerplate in %s" % ipy)
with open(ipy, "w") as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif snippet not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(snippet)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
maybe_ipy = None
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(cfg.versionfile_source, maybe_ipy)
return 0
def scan_setup_py() -> int:
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
def setup_command() -> NoReturn:
"""Set up Versioneer and exit with appropriate error code."""
errors = do_setup()
errors += scan_setup_py()
sys.exit(1 if errors else 0)
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
setup_command()
| VersioneerBadRootError |
python | django__django | tests/select_related/models.py | {
"start": 1107,
"end": 1233
} | class ____(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order, models.CASCADE)
| Family |
python | sympy__sympy | sympy/ntheory/generate.py | {
"start": 693,
"end": 33379
} | class ____:
"""A list of prime numbers, implemented as a dynamically
growing sieve of Eratosthenes. When a lookup is requested involving
an odd number that has not been sieved, the sieve is automatically
extended up to that number. Implementation details limit the number of
primes to ``2^32-1``.
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> 25 in sieve
False
>>> sieve._list
array('L', [2, 3, 5, 7, 11, 13, 17, 19, 23])
"""
# data shared (and updated) by all Sieve instances
def __init__(self, sieve_interval=1_000_000):
""" Initial parameters for the Sieve class.
Parameters
==========
sieve_interval (int): Amount of memory to be used
Raises
======
ValueError
If ``sieve_interval`` is not positive.
"""
self._n = 6
self._list = _array('L', [2, 3, 5, 7, 11, 13]) # primes
self._tlist = _array('L', [0, 1, 1, 2, 2, 4]) # totient
self._mlist = _array('i', [0, 1, -1, -1, 0, -1]) # mobius
if sieve_interval <= 0:
raise ValueError("sieve_interval should be a positive integer")
self.sieve_interval = sieve_interval
assert all(len(i) == self._n for i in (self._list, self._tlist, self._mlist))
def __repr__(self):
return ("<%s sieve (%i): %i, %i, %i, ... %i, %i\n"
"%s sieve (%i): %i, %i, %i, ... %i, %i\n"
"%s sieve (%i): %i, %i, %i, ... %i, %i>") % (
'prime', len(self._list),
self._list[0], self._list[1], self._list[2],
self._list[-2], self._list[-1],
'totient', len(self._tlist),
self._tlist[0], self._tlist[1],
self._tlist[2], self._tlist[-2], self._tlist[-1],
'mobius', len(self._mlist),
self._mlist[0], self._mlist[1],
self._mlist[2], self._mlist[-2], self._mlist[-1])
def _reset(self, prime=None, totient=None, mobius=None):
"""Reset all caches (default). To reset one or more set the
desired keyword to True."""
if all(i is None for i in (prime, totient, mobius)):
prime = totient = mobius = True
if prime:
self._list = self._list[:self._n]
if totient:
self._tlist = self._tlist[:self._n]
if mobius:
self._mlist = self._mlist[:self._n]
def extend(self, n):
"""Grow the sieve to cover all primes <= n.
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> sieve.extend(30)
>>> sieve[10] == 29
True
"""
n = int(n)
# `num` is even at any point in the function.
# This satisfies the condition required by `self._primerange`.
num = self._list[-1] + 1
if n < num:
return
num2 = num**2
while num2 <= n:
self._list += _array('L', self._primerange(num, num2))
num, num2 = num2, num2**2
# Merge the sieves
self._list += _array('L', self._primerange(num, n + 1))
def _primerange(self, a, b):
""" Generate all prime numbers in the range (a, b).
Parameters
==========
a, b : positive integers assuming the following conditions
* a is an even number
* 2 < self._list[-1] < a < b < nextprime(self._list[-1])**2
Yields
======
p (int): prime numbers such that ``a < p < b``
Examples
========
>>> from sympy.ntheory.generate import Sieve
>>> s = Sieve()
>>> s._list[-1]
13
>>> list(s._primerange(18, 31))
[19, 23, 29]
"""
if b % 2:
b -= 1
while a < b:
block_size = min(self.sieve_interval, (b - a) // 2)
# Create the list such that block[x] iff (a + 2x + 1) is prime.
# Note that even numbers are not considered here.
block = [True] * block_size
for p in self._list[1:bisect(self._list, sqrt(a + 2 * block_size + 1))]:
for t in range((-(a + 1 + p) // 2) % p, block_size, p):
block[t] = False
for idx, p in enumerate(block):
if p:
yield a + 2 * idx + 1
a += 2 * block_size
def extend_to_no(self, i):
"""Extend to include the ith prime number.
Parameters
==========
i : integer
Examples
========
>>> from sympy import sieve
>>> sieve._reset() # this line for doctest only
>>> sieve.extend_to_no(9)
>>> sieve._list
array('L', [2, 3, 5, 7, 11, 13, 17, 19, 23])
Notes
=====
The list is extended by 50% if it is too short, so it is
likely that it will be longer than requested.
"""
i = as_int(i)
while len(self._list) < i:
self.extend(int(self._list[-1] * 1.5))
def primerange(self, a, b=None):
"""Generate all prime numbers in the range [2, a) or [a, b).
Examples
========
>>> from sympy import sieve, prime
All primes less than 19:
>>> print([i for i in sieve.primerange(19)])
[2, 3, 5, 7, 11, 13, 17]
All primes greater than or equal to 7 and less than 19:
>>> print([i for i in sieve.primerange(7, 19)])
[7, 11, 13, 17]
All primes through the 10th prime
>>> list(sieve.primerange(prime(10) + 1))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
"""
if b is None:
b = _as_int_ceiling(a)
a = 2
else:
a = max(2, _as_int_ceiling(a))
b = _as_int_ceiling(b)
if a >= b:
return
self.extend(b)
yield from self._list[bisect_left(self._list, a):
bisect_left(self._list, b)]
def totientrange(self, a, b):
"""Generate all totient numbers for the range [a, b).
Examples
========
>>> from sympy import sieve
>>> print([i for i in sieve.totientrange(7, 18)])
[6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16]
"""
a = max(1, _as_int_ceiling(a))
b = _as_int_ceiling(b)
n = len(self._tlist)
if a >= b:
return
elif b <= n:
for i in range(a, b):
yield self._tlist[i]
else:
self._tlist += _array('L', range(n, b))
for i in range(1, n):
ti = self._tlist[i]
if ti == i - 1:
startindex = (n + i - 1) // i * i
for j in range(startindex, b, i):
self._tlist[j] -= self._tlist[j] // i
if i >= a:
yield ti
for i in range(n, b):
ti = self._tlist[i]
if ti == i:
for j in range(i, b, i):
self._tlist[j] -= self._tlist[j] // i
if i >= a:
yield self._tlist[i]
def mobiusrange(self, a, b):
"""Generate all mobius numbers for the range [a, b).
Parameters
==========
a : integer
First number in range
b : integer
First number outside of range
Examples
========
>>> from sympy import sieve
>>> print([i for i in sieve.mobiusrange(7, 18)])
[-1, 0, 0, 1, -1, 0, -1, 1, 1, 0, -1]
"""
a = max(1, _as_int_ceiling(a))
b = _as_int_ceiling(b)
n = len(self._mlist)
if a >= b:
return
elif b <= n:
for i in range(a, b):
yield self._mlist[i]
else:
self._mlist += _array('i', [0]*(b - n))
for i in range(1, n):
mi = self._mlist[i]
startindex = (n + i - 1) // i * i
for j in range(startindex, b, i):
self._mlist[j] -= mi
if i >= a:
yield mi
for i in range(n, b):
mi = self._mlist[i]
for j in range(2 * i, b, i):
self._mlist[j] -= mi
if i >= a:
yield mi
def search(self, n):
"""Return the indices i, j of the primes that bound n.
If n is prime then i == j.
Although n can be an expression, if ceiling cannot convert
it to an integer then an n error will be raised.
Examples
========
>>> from sympy import sieve
>>> sieve.search(25)
(9, 10)
>>> sieve.search(23)
(9, 9)
"""
test = _as_int_ceiling(n)
n = as_int(n)
if n < 2:
raise ValueError(f"n should be >= 2 but got: {n}")
if n > self._list[-1]:
self.extend(n)
b = bisect(self._list, n)
if self._list[b - 1] == test:
return b, b
else:
return b, b + 1
def __contains__(self, n):
try:
n = as_int(n)
assert n >= 2
except (ValueError, AssertionError):
return False
if n % 2 == 0:
return n == 2
a, b = self.search(n)
return a == b
def __iter__(self):
for n in count(1):
yield self[n]
def __getitem__(self, n):
"""Return the nth prime number"""
if isinstance(n, slice):
self.extend_to_no(n.stop)
start = n.start if n.start is not None else 0
if start < 1:
# sieve[:5] would be empty (starting at -1), let's
# just be explicit and raise.
raise IndexError("Sieve indices start at 1.")
return self._list[start - 1:n.stop - 1:n.step]
else:
if n < 1:
# offset is one, so forbid explicit access to sieve[0]
# (would surprisingly return the last one).
raise IndexError("Sieve indices start at 1.")
n = as_int(n)
self.extend_to_no(n)
return self._list[n - 1]
# Generate a global object for repeated use in trial division etc
sieve = Sieve()
def prime(nth):
r"""
Return the nth prime number, where primes are indexed starting from 1:
prime(1) = 2, prime(2) = 3, etc.
Parameters
==========
nth : int
The position of the prime number to return (must be a positive integer).
Returns
=======
int
The nth prime number.
Examples
========
>>> from sympy import prime
>>> prime(10)
29
>>> prime(1)
2
>>> prime(100000)
1299709
See Also
========
sympy.ntheory.primetest.isprime : Test if a number is prime.
primerange : Generate all primes in a given range.
primepi : Return the number of primes less than or equal to a given number.
References
==========
.. [1] https://en.wikipedia.org/wiki/Prime_number_theorem
.. [2] https://en.wikipedia.org/wiki/Logarithmic_integral_function
.. [3] https://en.wikipedia.org/wiki/Skewes%27_number
"""
n = as_int(nth)
if n < 1:
raise ValueError("nth must be a positive integer; prime(1) == 2")
# Check if n is within the sieve range
if n <= len(sieve._list):
return sieve[n]
from sympy.functions.elementary.exponential import log
from sympy.functions.special.error_functions import li
if n < 1000:
# Extend sieve up to 8*n as this is empirically sufficient
sieve.extend(8 * n)
return sieve[n]
a = 2
# Estimate an upper bound for the nth prime using the prime number theorem
b = int(n * (log(n).evalf() + log(log(n)).evalf()))
# Binary search for the least m such that li(m) > n
while a < b:
mid = (a + b) >> 1
if li(mid).evalf() > n:
b = mid
else:
a = mid + 1
return nextprime(a - 1, n - _primepi(a - 1))
@deprecated("""\
The `sympy.ntheory.generate.primepi` has been moved to `sympy.functions.combinatorial.numbers.primepi`.""",
deprecated_since_version="1.13",
active_deprecations_target='deprecated-ntheory-symbolic-functions')
def primepi(n):
r""" Represents the prime counting function pi(n) = the number
of prime numbers less than or equal to n.
.. deprecated:: 1.13
The ``primepi`` function is deprecated. Use :class:`sympy.functions.combinatorial.numbers.primepi`
instead. See its documentation for more information. See
:ref:`deprecated-ntheory-symbolic-functions` for details.
Algorithm Description:
In sieve method, we remove all multiples of prime p
except p itself.
Let phi(i,j) be the number of integers 2 <= k <= i
which remain after sieving from primes less than
or equal to j.
Clearly, pi(n) = phi(n, sqrt(n))
If j is not a prime,
phi(i,j) = phi(i, j - 1)
if j is a prime,
We remove all numbers(except j) whose
smallest prime factor is j.
Let $x= j \times a$ be such a number, where $2 \le a \le i / j$
Now, after sieving from primes $\le j - 1$,
a must remain
(because x, and hence a has no prime factor $\le j - 1$)
Clearly, there are phi(i / j, j - 1) such a
which remain on sieving from primes $\le j - 1$
Now, if a is a prime less than equal to j - 1,
$x= j \times a$ has smallest prime factor = a, and
has already been removed(by sieving from a).
So, we do not need to remove it again.
(Note: there will be pi(j - 1) such x)
Thus, number of x, that will be removed are:
phi(i / j, j - 1) - phi(j - 1, j - 1)
(Note that pi(j - 1) = phi(j - 1, j - 1))
$\Rightarrow$ phi(i,j) = phi(i, j - 1) - phi(i / j, j - 1) + phi(j - 1, j - 1)
So,following recursion is used and implemented as dp:
phi(a, b) = phi(a, b - 1), if b is not a prime
phi(a, b) = phi(a, b-1)-phi(a / b, b-1) + phi(b-1, b-1), if b is prime
Clearly a is always of the form floor(n / k),
which can take at most $2\sqrt{n}$ values.
Two arrays arr1,arr2 are maintained
arr1[i] = phi(i, j),
arr2[i] = phi(n // i, j)
Finally the answer is arr2[1]
Examples
========
>>> from sympy import primepi, prime, prevprime, isprime
>>> primepi(25)
9
So there are 9 primes less than or equal to 25. Is 25 prime?
>>> isprime(25)
False
It is not. So the first prime less than 25 must be the
9th prime:
>>> prevprime(25) == prime(9)
True
See Also
========
sympy.ntheory.primetest.isprime : Test if n is prime
primerange : Generate all primes in a given range
prime : Return the nth prime
"""
from sympy.functions.combinatorial.numbers import primepi as func_primepi
return func_primepi(n)
def _primepi(n:int) -> int:
r""" Represents the prime counting function pi(n) = the number
of prime numbers less than or equal to n.
Explanation
===========
In sieve method, we remove all multiples of prime p
except p itself.
Let phi(i,j) be the number of integers 2 <= k <= i
which remain after sieving from primes less than
or equal to j.
Clearly, pi(n) = phi(n, sqrt(n))
If j is not a prime,
phi(i,j) = phi(i, j - 1)
if j is a prime,
We remove all numbers(except j) whose
smallest prime factor is j.
Let $x= j \times a$ be such a number, where $2 \le a \le i / j$
Now, after sieving from primes $\le j - 1$,
a must remain
(because x, and hence a has no prime factor $\le j - 1$)
Clearly, there are phi(i / j, j - 1) such a
which remain on sieving from primes $\le j - 1$
Now, if a is a prime less than equal to j - 1,
$x= j \times a$ has smallest prime factor = a, and
has already been removed(by sieving from a).
So, we do not need to remove it again.
(Note: there will be pi(j - 1) such x)
Thus, number of x, that will be removed are:
phi(i / j, j - 1) - phi(j - 1, j - 1)
(Note that pi(j - 1) = phi(j - 1, j - 1))
$\Rightarrow$ phi(i,j) = phi(i, j - 1) - phi(i / j, j - 1) + phi(j - 1, j - 1)
So,following recursion is used and implemented as dp:
phi(a, b) = phi(a, b - 1), if b is not a prime
phi(a, b) = phi(a, b-1)-phi(a / b, b-1) + phi(b-1, b-1), if b is prime
Clearly a is always of the form floor(n / k),
which can take at most $2\sqrt{n}$ values.
Two arrays arr1,arr2 are maintained
arr1[i] = phi(i, j),
arr2[i] = phi(n // i, j)
Finally the answer is arr2[1]
Parameters
==========
n : int
"""
if n < 2:
return 0
if n <= sieve._list[-1]:
return sieve.search(n)[0]
lim = int(sqrt(n))
arr1 = [(i + 1) >> 1 for i in range(lim + 1)]
arr2 = [0] + [(n//i + 1) >> 1 for i in range(1, lim + 1)]
skip = [False] * (lim + 1)
for i in range(3, lim + 1, 2):
# Presently, arr1[k]=phi(k,i - 1),
# arr2[k] = phi(n // k,i - 1) # not all k's do this
if skip[i]:
# skip if i is a composite number
continue
p = arr1[i - 1]
for j in range(i, lim + 1, i):
skip[j] = True
# update arr2
# phi(n/j, i) = phi(n/j, i-1) - phi(n/(i*j), i-1) + phi(i-1, i-1)
for j in range(1, min(n // (i * i), lim) + 1, 2):
# No need for arr2[j] in j such that skip[j] is True to
# compute the final required arr2[1].
if skip[j]:
continue
st = i * j
if st <= lim:
arr2[j] -= arr2[st] - p
else:
arr2[j] -= arr1[n // st] - p
# update arr1
# phi(j, i) = phi(j, i-1) - phi(j/i, i-1) + phi(i-1, i-1)
# where the range below i**2 is fixed and
# does not need to be calculated.
for j in range(lim, min(lim, i*i - 1), -1):
arr1[j] -= arr1[j // i] - p
return arr2[1]
def nextprime(n, ith=1):
""" Return the ith prime greater than n.
Parameters
==========
n : integer
ith : positive integer
Returns
=======
int : Return the ith prime greater than n
Raises
======
ValueError
If ``ith <= 0``.
If ``n`` or ``ith`` is not an integer.
Notes
=====
Potential primes are located at 6*j +/- 1. This
property is used during searching.
>>> from sympy import nextprime
>>> [(i, nextprime(i)) for i in range(10, 15)]
[(10, 11), (11, 13), (12, 13), (13, 17), (14, 17)]
>>> nextprime(2, ith=2) # the 2nd prime after 2
5
See Also
========
prevprime : Return the largest prime smaller than n
primerange : Generate all primes in a given range
"""
n = int(n)
i = as_int(ith)
if i <= 0:
raise ValueError("ith should be positive")
if n < 2:
n = 2
i -= 1
if n <= sieve._list[-2]:
l, _ = sieve.search(n)
if l + i - 1 < len(sieve._list):
return sieve._list[l + i - 1]
n = sieve._list[-1]
i += l - len(sieve._list)
nn = 6*(n//6)
if nn == n:
n += 1
if isprime(n):
i -= 1
if not i:
return n
n += 4
elif n - nn == 5:
n += 2
if isprime(n):
i -= 1
if not i:
return n
n += 4
else:
n = nn + 5
while 1:
if isprime(n):
i -= 1
if not i:
return n
n += 2
if isprime(n):
i -= 1
if not i:
return n
n += 4
def prevprime(n):
""" Return the largest prime smaller than n.
Notes
=====
Potential primes are located at 6*j +/- 1. This
property is used during searching.
>>> from sympy import prevprime
>>> [(i, prevprime(i)) for i in range(10, 15)]
[(10, 7), (11, 7), (12, 11), (13, 11), (14, 13)]
See Also
========
nextprime : Return the ith prime greater than n
primerange : Generates all primes in a given range
"""
n = _as_int_ceiling(n)
if n < 3:
raise ValueError("no preceding primes")
if n < 8:
return {3: 2, 4: 3, 5: 3, 6: 5, 7: 5}[n]
if n <= sieve._list[-1]:
l, u = sieve.search(n)
if l == u:
return sieve[l-1]
else:
return sieve[l]
nn = 6*(n//6)
if n - nn <= 1:
n = nn - 1
if isprime(n):
return n
n -= 4
else:
n = nn + 1
while 1:
if isprime(n):
return n
n -= 2
if isprime(n):
return n
n -= 4
def primerange(a, b=None):
""" Generate a list of all prime numbers in the range [2, a),
or [a, b).
If the range exists in the default sieve, the values will
be returned from there; otherwise values will be returned
but will not modify the sieve.
Examples
========
>>> from sympy import primerange, prime
All primes less than 19:
>>> list(primerange(19))
[2, 3, 5, 7, 11, 13, 17]
All primes greater than or equal to 7 and less than 19:
>>> list(primerange(7, 19))
[7, 11, 13, 17]
All primes through the 10th prime
>>> list(primerange(prime(10) + 1))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
The Sieve method, primerange, is generally faster but it will
occupy more memory as the sieve stores values. The default
instance of Sieve, named sieve, can be used:
>>> from sympy import sieve
>>> list(sieve.primerange(1, 30))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
Notes
=====
Some famous conjectures about the occurrence of primes in a given
range are [1]:
- Twin primes: though often not, the following will give 2 primes
an infinite number of times:
primerange(6*n - 1, 6*n + 2)
- Legendre's: the following always yields at least one prime
primerange(n**2, (n+1)**2+1)
- Bertrand's (proven): there is always a prime in the range
primerange(n, 2*n)
- Brocard's: there are at least four primes in the range
primerange(prime(n)**2, prime(n+1)**2)
The average gap between primes is log(n) [2]; the gap between
primes can be arbitrarily large since sequences of composite
numbers are arbitrarily large, e.g. the numbers in the sequence
n! + 2, n! + 3 ... n! + n are all composite.
See Also
========
prime : Return the nth prime
nextprime : Return the ith prime greater than n
prevprime : Return the largest prime smaller than n
randprime : Returns a random prime in a given range
primorial : Returns the product of primes based on condition
Sieve.primerange : return range from already computed primes
or extend the sieve to contain the requested
range.
References
==========
.. [1] https://en.wikipedia.org/wiki/Prime_number
.. [2] https://primes.utm.edu/notes/gaps.html
"""
if b is None:
a, b = 2, a
if a >= b:
return
# If we already have the range, return it.
largest_known_prime = sieve._list[-1]
if b <= largest_known_prime:
yield from sieve.primerange(a, b)
return
# If we know some of it, return it.
if a <= largest_known_prime:
yield from sieve._list[bisect_left(sieve._list, a):]
a = largest_known_prime + 1
elif a % 2:
a -= 1
tail = min(b, (largest_known_prime)**2)
if a < tail:
yield from sieve._primerange(a, tail)
a = tail
if b <= a:
return
# otherwise compute, without storing, the desired range.
while 1:
a = nextprime(a)
if a < b:
yield a
else:
return
def randprime(a, b):
""" Return a random prime number in the range [a, b).
Bertrand's postulate assures that
randprime(a, 2*a) will always succeed for a > 1.
Note that due to implementation difficulties,
the prime numbers chosen are not uniformly random.
For example, there are two primes in the range [112, 128),
``113`` and ``127``, but ``randprime(112, 128)`` returns ``127``
with a probability of 15/17.
Examples
========
>>> from sympy import randprime, isprime
>>> randprime(1, 30) #doctest: +SKIP
13
>>> isprime(randprime(1, 30))
True
See Also
========
primerange : Generate all primes in a given range
References
==========
.. [1] https://en.wikipedia.org/wiki/Bertrand's_postulate
"""
if a >= b:
return
a, b = map(int, (a, b))
n = randint(a - 1, b)
p = nextprime(n)
if p >= b:
p = prevprime(b)
if p < a:
raise ValueError("no primes exist in the specified range")
return p
def primorial(n, nth=True):
"""
Returns the product of the first n primes (default) or
the primes less than or equal to n (when ``nth=False``).
Examples
========
>>> from sympy.ntheory.generate import primorial, primerange
>>> from sympy import factorint, Mul, primefactors, sqrt
>>> primorial(4) # the first 4 primes are 2, 3, 5, 7
210
>>> primorial(4, nth=False) # primes <= 4 are 2 and 3
6
>>> primorial(1)
2
>>> primorial(1, nth=False)
1
>>> primorial(sqrt(101), nth=False)
210
One can argue that the primes are infinite since if you take
a set of primes and multiply them together (e.g. the primorial) and
then add or subtract 1, the result cannot be divided by any of the
original factors, hence either 1 or more new primes must divide this
product of primes.
In this case, the number itself is a new prime:
>>> factorint(primorial(4) + 1)
{211: 1}
In this case two new primes are the factors:
>>> factorint(primorial(4) - 1)
{11: 1, 19: 1}
Here, some primes smaller and larger than the primes multiplied together
are obtained:
>>> p = list(primerange(10, 20))
>>> sorted(set(primefactors(Mul(*p) + 1)).difference(set(p)))
[2, 5, 31, 149]
See Also
========
primerange : Generate all primes in a given range
"""
if nth:
n = as_int(n)
else:
n = int(n)
if n < 1:
raise ValueError("primorial argument must be >= 1")
p = 1
if nth:
for i in range(1, n + 1):
p *= prime(i)
else:
for i in primerange(2, n + 1):
p *= i
return p
def cycle_length(f, x0, nmax=None, values=False):
"""For a given iterated sequence, return a generator that gives
the length of the iterated cycle (lambda) and the length of terms
before the cycle begins (mu); if ``values`` is True then the
terms of the sequence will be returned instead. The sequence is
started with value ``x0``.
Note: more than the first lambda + mu terms may be returned and this
is the cost of cycle detection with Brent's method; there are, however,
generally less terms calculated than would have been calculated if the
proper ending point were determined, e.g. by using Floyd's method.
>>> from sympy.ntheory.generate import cycle_length
This will yield successive values of i <-- func(i):
>>> def gen(func, i):
... while 1:
... yield i
... i = func(i)
...
A function is defined:
>>> func = lambda i: (i**2 + 1) % 51
and given a seed of 4 and the mu and lambda terms calculated:
>>> next(cycle_length(func, 4))
(6, 3)
We can see what is meant by looking at the output:
>>> iter = cycle_length(func, 4, values=True)
>>> list(iter)
[4, 17, 35, 2, 5, 26, 14, 44, 50, 2, 5, 26, 14]
There are 6 repeating values after the first 3.
If a sequence is suspected of being longer than you might wish, ``nmax``
can be used to exit early (and mu will be returned as None):
>>> next(cycle_length(func, 4, nmax = 4))
(4, None)
>>> list(cycle_length(func, 4, nmax = 4, values=True))
[4, 17, 35, 2]
Code modified from:
https://en.wikipedia.org/wiki/Cycle_detection.
"""
nmax = int(nmax or 0)
# main phase: search successive powers of two
power = lam = 1
tortoise, hare = x0, f(x0) # f(x0) is the element/node next to x0.
i = 1
if values:
yield tortoise
while tortoise != hare and (not nmax or i < nmax):
i += 1
if power == lam: # time to start a new power of two?
tortoise = hare
power *= 2
lam = 0
if values:
yield hare
hare = f(hare)
lam += 1
if nmax and i == nmax:
if values:
return
else:
yield nmax, None
return
if not values:
# Find the position of the first repetition of length lambda
mu = 0
tortoise = hare = x0
for i in range(lam):
hare = f(hare)
while tortoise != hare:
tortoise = f(tortoise)
hare = f(hare)
mu += 1
yield lam, mu
def composite(nth):
""" Return the nth composite number, with the composite numbers indexed as
composite(1) = 4, composite(2) = 6, etc....
Examples
========
>>> from sympy import composite
>>> composite(36)
52
>>> composite(1)
4
>>> composite(17737)
20000
See Also
========
sympy.ntheory.primetest.isprime : Test if n is prime
primerange : Generate all primes in a given range
primepi : Return the number of primes less than or equal to n
prime : Return the nth prime
compositepi : Return the number of positive composite numbers less than or equal to n
"""
n = as_int(nth)
if n < 1:
raise ValueError("nth must be a positive integer; composite(1) == 4")
composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]
if n <= 10:
return composite_arr[n - 1]
a, b = 4, sieve._list[-1]
if n <= b - _primepi(b) - 1:
while a < b - 1:
mid = (a + b) >> 1
if mid - _primepi(mid) - 1 > n:
b = mid
else:
a = mid
if isprime(a):
a -= 1
return a
from sympy.functions.elementary.exponential import log
from sympy.functions.special.error_functions import li
a = 4 # Lower bound for binary search
b = int(n*(log(n) + log(log(n)))) # Upper bound for the search.
while a < b:
mid = (a + b) >> 1
if mid - li(mid) - 1 > n:
b = mid
else:
a = mid + 1
n_composites = a - _primepi(a) - 1
while n_composites > n:
if not isprime(a):
n_composites -= 1
a -= 1
if isprime(a):
a -= 1
return a
def compositepi(n):
""" Return the number of positive composite numbers less than or equal to n.
The first positive composite is 4, i.e. compositepi(4) = 1.
Examples
========
>>> from sympy import compositepi
>>> compositepi(25)
15
>>> compositepi(1000)
831
See Also
========
sympy.ntheory.primetest.isprime : Test if n is prime
primerange : Generate all primes in a given range
prime : Return the nth prime
primepi : Return the number of primes less than or equal to n
composite : Return the nth composite number
"""
n = int(n)
if n < 4:
return 0
return n - _primepi(n) - 1
| Sieve |
python | pytorch__pytorch | torch/_subclasses/fake_tensor.py | {
"start": 3269,
"end": 3352
} | class ____(RuntimeError):
func: OpOverload
@dataclass
| DynamicOutputShapeException |
python | pytorch__pytorch | torch/utils/data/datapipes/dataframe/structures.py | {
"start": 230,
"end": 662
} | class ____(DataChunk):
"""DataChunkDF iterating over individual items inside of DataFrame containers, to access DataFrames user `raw_iterator`."""
def __iter__(self) -> Iterator[Any]:
for df in self.items:
yield from df_wrapper.iterate(df)
def __len__(self) -> int:
total_len = 0
for df in self.items:
total_len += df_wrapper.get_len(df)
return total_len
| DataChunkDF |
python | lepture__authlib | authlib/integrations/requests_client/oauth2_session.py | {
"start": 1133,
"end": 1426
} | class ____(AuthBase, ClientAuth):
"""Attaches OAuth Client Authentication to the given Request object."""
def __call__(self, req):
req.url, req.headers, req.body = self.prepare(
req.method, req.url, req.headers, req.body
)
return req
| OAuth2ClientAuth |
python | ethereum__web3.py | ens/_normalization.py | {
"start": 2035,
"end": 2115
} | class ____(Token):
type: Literal[TokenType.EMOJI] = TokenType.EMOJI
| EmojiToken |
python | kamyu104__LeetCode-Solutions | Python/maximum-alternating-subsequence-sum.py | {
"start": 29,
"end": 299
} | class ____(object):
def maxAlternatingSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = nums[0]
for i in xrange(len(nums)-1):
result += max(nums[i+1]-nums[i], 0)
return result
| Solution |
python | keras-team__keras | keras/src/metrics/regression_metrics.py | {
"start": 3958,
"end": 5233
} | class ____(reduction_metrics.MeanMetricWrapper):
"""Computes mean squared logarithmic error between `y_true` and `y_pred`.
Formula:
```python
loss = mean(square(log(y_true + 1) - log(y_pred + 1)))
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.MeanSquaredLogarithmicError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result()
0.12011322
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result()
0.24022643
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.MeanSquaredLogarithmicError()])
```
"""
def __init__(self, name="mean_squared_logarithmic_error", dtype=None):
super().__init__(mean_squared_logarithmic_error, name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.RootMeanSquaredError")
| MeanSquaredLogarithmicError |
python | getsentry__sentry | tests/sentry/monitors/endpoints/test_project_monitor_environment_details.py | {
"start": 419,
"end": 584
} | class ____(BaseDeleteMonitorTest, BaseProjectMonitorTest):
endpoint = "sentry-api-0-project-monitor-environment-details"
__test__ = True
| ProjectDeleteMonitorTest |
python | pypa__warehouse | warehouse/db.py | {
"start": 2920,
"end": 7463
} | class ____(ModelBase):
__abstract__ = True
id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
primary_key=True,
server_default=func.gen_random_uuid(),
)
# Create our session class here, this will stay stateless as we'll bind the
# engine to each new state we create instead of binding it to the session
# class.
Session = sessionmaker()
def listens_for(target, identifier, *args, **kwargs):
def deco(wrapped):
def callback(scanner, _name, wrapped):
wrapped = functools.partial(wrapped, scanner.config)
event.listen(target, identifier, wrapped, *args, **kwargs)
venusian.attach(wrapped, callback, category="warehouse")
return wrapped
return deco
def _configure_alembic(config):
alembic_cfg = alembic.config.Config()
alembic_cfg.set_main_option("script_location", "warehouse:migrations")
alembic_cfg.set_main_option("url", config.registry.settings["database.url"])
alembic_cfg.set_section_option("post_write_hooks", "hooks", "black, isort")
alembic_cfg.set_section_option("post_write_hooks", "black.type", "console_scripts")
alembic_cfg.set_section_option("post_write_hooks", "black.entrypoint", "black")
alembic_cfg.set_section_option("post_write_hooks", "isort.type", "console_scripts")
alembic_cfg.set_section_option("post_write_hooks", "isort.entrypoint", "isort")
return alembic_cfg
def _create_session(request):
metrics = request.find_service(IMetricsService, context=None)
metrics.increment("warehouse.db.session.start")
# Create our connection, most likely pulling it from the pool of
# connections
try:
connection = request.registry["sqlalchemy.engine"].connect()
except OperationalError:
# When we tried to connection to PostgreSQL, our database was not available for
# some reason. We're going to log it here and then raise our error. Most likely
# this is a transient error that will go away.
logger.warning("Got an error connecting to PostgreSQL", exc_info=True)
metrics.increment("warehouse.db.session.error", tags=["error_in:connecting"])
raise DatabaseNotAvailableError()
# Now, create a session from our connection
session = Session(bind=connection)
# Register only this particular session with zope.sqlalchemy
zope.sqlalchemy.register(session, transaction_manager=request.tm)
# Setup a callback that will ensure that everything is cleaned up at the
# end of our connection.
@request.add_finished_callback
def cleanup(request):
metrics.increment("warehouse.db.session.finished")
session.close()
connection.close()
# Check if we're in read-only mode. This _cannot_ use the request.flags
# request method, as that would lead to a circular call as AdminFlag objects
# must be queried from the DB
from warehouse.admin.flags import AdminFlag, AdminFlagValue
flag = session.get(AdminFlag, AdminFlagValue.READ_ONLY.value)
if flag and flag.enabled:
request.tm.doom()
# Return our session now that it's created and registered
return session
@event.listens_for(sqlalchemy.engine.Engine, "handle_error")
def unwrap_dbapi_exceptions(context):
"""
Listens for SQLAlchemy errors and raises the original
DBAPI (e.g., psycopg) exception instead.
"""
if (
isinstance(context.sqlalchemy_exception, DBAPIError)
and context.original_exception
):
raise context.original_exception from context.sqlalchemy_exception
def includeme(config):
# Add a directive to get an alembic configuration.
config.add_directive("alembic_config", _configure_alembic)
# Create our SQLAlchemy Engine.
config.registry["sqlalchemy.engine"] = sqlalchemy.create_engine(
config.registry.settings["database.url"],
isolation_level=DEFAULT_ISOLATION,
pool_size=35,
max_overflow=65,
pool_timeout=20,
)
# Possibly override how to fetch new db sessions from config.settings
# Useful in test fixtures
db_session_factory = config.registry.settings.get(
"warehouse.db_create_session", _create_session
)
config.add_request_method(db_session_factory, name="db", reify=True)
# Set a custom JSON serializer for psycopg
renderer = JSON()
renderer_factory = renderer(None)
def serialize_as_json(obj):
return renderer_factory(obj, {})
psycopg.types.json.set_json_dumps(serialize_as_json)
| Model |
python | python-poetry__poetry | src/poetry/console/commands/self/sync.py | {
"start": 233,
"end": 1043
} | class ____(SelfInstallCommand):
name = "self sync"
description = (
"Sync Poetry's own environment according to the locked packages (incl. addons)"
" required by this Poetry installation."
)
options: ClassVar[list[Option]] = [
opt for opt in SelfInstallCommand.options if opt.name != "sync"
]
help = f"""\
The <c1>self sync</c1> command ensures all additional (and no other) packages \
specified are installed in the current runtime environment.
This is managed in the \
<comment>{SelfInstallCommand.get_default_system_pyproject_file()}</> file.
You can add more packages using the <c1>self add</c1> command and remove them using \
the <c1>self remove</c1> command.
"""
@property
def _with_synchronization(self) -> bool:
return True
| SelfSyncCommand |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/__init__.py | {
"start": 17475,
"end": 18714
} | class ____(BaseAsset):
"""A representation of asset alias which is used to create asset during the runtime."""
name: str = attrs.field(validator=_validate_non_empty_identifier)
group: str = attrs.field(kw_only=True, default="asset", validator=_validate_identifier)
def as_expression(self) -> Any:
"""
Serialize the asset alias into its scheduling expression.
:meta private:
"""
return {"alias": {"name": self.name, "group": self.group}}
def iter_assets(self) -> Iterator[tuple[AssetUniqueKey, Asset]]:
return iter(())
def iter_asset_aliases(self) -> Iterator[tuple[str, AssetAlias]]:
yield self.name, self
def iter_asset_refs(self) -> Iterator[AssetRef]:
return iter(())
def iter_dag_dependencies(self, *, source: str = "", target: str = "") -> Iterator[DagDependency]:
"""
Iterate an asset alias and its resolved assets as dag dependency.
:meta private:
"""
yield DagDependency(
source=source or "asset-alias",
target=target or "asset-alias",
label=self.name,
dependency_type="asset-alias",
dependency_id=self.name,
)
| AssetAlias |
python | scikit-learn__scikit-learn | sklearn/externals/_arff.py | {
"start": 22748,
"end": 31232
} | class ____:
'''An ARFF decoder.'''
def __init__(self):
'''Constructor.'''
self._conversors = []
self._current_line = 0
def _decode_comment(self, s):
'''(INTERNAL) Decodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded comment.
'''
res = re.sub(r'^\%( )?', '', s)
return res
def _decode_relation(self, s):
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded relation name.
'''
_, v = s.split(' ', 1)
v = v.strip()
if not _RE_RELATION.match(v):
raise BadRelationFormat()
res = str(v.strip('"\''))
return res
def _decode_attribute(self, s):
'''(INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
'''
_, v = s.split(' ', 1)
v = v.strip()
# Verify the general structure of declaration
m = _RE_ATTRIBUTE.match(v)
if not m:
raise BadAttributeFormat()
# Extracts the raw name and type
name, type_ = m.groups()
# Extracts the final name
name = str(name.strip('"\''))
# Extracts the final type
if type_[:1] == "{" and type_[-1:] == "}":
try:
type_ = _parse_values(type_.strip('{} '))
except Exception:
raise BadAttributeType()
if isinstance(type_, dict):
raise BadAttributeType()
else:
# If not nominal, verify the type name
type_ = str(type_).upper()
if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']:
raise BadAttributeType()
return (name, type_)
def _decode(self, s, encode_nominal=False, matrix_type=DENSE):
'''Do the job the ``encode``.'''
# Make sure this method is idempotent
self._current_line = 0
# If string, convert to a list of lines
if isinstance(s, str):
s = s.strip('\r\n ').replace('\r\n', '\n').split('\n')
# Create the return object
obj: ArffContainerType = {
'description': '',
'relation': '',
'attributes': [],
'data': []
}
attribute_names = {}
# Create the data helper object
data = _get_data_object_for_decoding(matrix_type)
# Read all lines
STATE = _TK_DESCRIPTION
s = iter(s)
for row in s:
self._current_line += 1
# Ignore empty lines
row = row.strip(' \r\n')
if not row: continue
u_row = row.upper()
# DESCRIPTION -----------------------------------------------------
if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION:
obj['description'] += self._decode_comment(row) + '\n'
# -----------------------------------------------------------------
# RELATION --------------------------------------------------------
elif u_row.startswith(_TK_RELATION):
if STATE != _TK_DESCRIPTION:
raise BadLayout()
STATE = _TK_RELATION
obj['relation'] = self._decode_relation(row)
# -----------------------------------------------------------------
# ATTRIBUTE -------------------------------------------------------
elif u_row.startswith(_TK_ATTRIBUTE):
if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE:
raise BadLayout()
STATE = _TK_ATTRIBUTE
attr = self._decode_attribute(row)
if attr[0] in attribute_names:
raise BadAttributeName(attr[0], attribute_names[attr[0]])
else:
attribute_names[attr[0]] = self._current_line
obj['attributes'].append(attr)
if isinstance(attr[1], (list, tuple)):
if encode_nominal:
conversor = EncodedNominalConversor(attr[1])
else:
conversor = NominalConversor(attr[1])
else:
CONVERSOR_MAP = {'STRING': str,
'INTEGER': lambda x: int(float(x)),
'NUMERIC': float,
'REAL': float}
conversor = CONVERSOR_MAP[attr[1]]
self._conversors.append(conversor)
# -----------------------------------------------------------------
# DATA ------------------------------------------------------------
elif u_row.startswith(_TK_DATA):
if STATE != _TK_ATTRIBUTE:
raise BadLayout()
break
# -----------------------------------------------------------------
# COMMENT ---------------------------------------------------------
elif u_row.startswith(_TK_COMMENT):
pass
# -----------------------------------------------------------------
else:
# Never found @DATA
raise BadLayout()
def stream():
for row in s:
self._current_line += 1
row = row.strip()
# Ignore empty lines and comment lines.
if row and not row.startswith(_TK_COMMENT):
yield row
# Alter the data object
obj['data'] = data.decode_rows(stream(), self._conversors)
if obj['description'].endswith('\n'):
obj['description'] = obj['description'][:-1]
return obj
def decode(self, s, encode_nominal=False, return_type=DENSE):
'''Returns the Python representation of a given ARFF file.
When a file object is passed as an argument, this method reads lines
iteratively, avoiding to load unnecessary information to the memory.
:param s: a string or file object with the ARFF file.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
'''
try:
return self._decode(s, encode_nominal=encode_nominal,
matrix_type=return_type)
except ArffException as e:
e.line = self._current_line
raise e
| ArffDecoder |
python | weaviate__weaviate-python-client | weaviate/collections/batch/batch_wrapper.py | {
"start": 10914,
"end": 11307
} | class ____(Generic[T, P]):
def __init__(self, current_batch: T):
self.__current_batch: T = current_batch
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.__current_batch._shutdown()
def __enter__(self) -> P:
self.__current_batch._start()
return self.__current_batch # pyright: ignore[reportReturnType]
| _ContextManagerWrapper |
python | doocs__leetcode | lcp/LCP 41. 黑白翻转棋/Solution.py | {
"start": 0,
"end": 1118
} | class ____:
def flipChess(self, chessboard: List[str]) -> int:
def bfs(i: int, j: int) -> int:
q = deque([(i, j)])
g = [list(row) for row in chessboard]
g[i][j] = "X"
cnt = 0
while q:
i, j = q.popleft()
for a, b in dirs:
x, y = i + a, j + b
while 0 <= x < m and 0 <= y < n and g[x][y] == "O":
x, y = x + a, y + b
if 0 <= x < m and 0 <= y < n and g[x][y] == "X":
x, y = x - a, y - b
cnt += max(abs(x - i), abs(y - j))
while x != i or y != j:
g[x][y] = "X"
q.append((x, y))
x, y = x - a, y - b
return cnt
m, n = len(chessboard), len(chessboard[0])
dirs = [(a, b) for a in range(-1, 2) for b in range(-1, 2) if a != 0 or b != 0]
return max(
bfs(i, j) for i in range(m) for j in range(n) if chessboard[i][j] == "."
)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver43.py | {
"start": 156,
"end": 194
} | class ____[T](Protocol):
x: T
| Proto1 |
python | pytorch__pytorch | torch/distributions/transforms.py | {
"start": 15560,
"end": 18328
} | class ____(Transform):
"""
Unit Jacobian transform to reshape the rightmost part of a tensor.
Note that ``in_shape`` and ``out_shape`` must have the same number of
elements, just as for :meth:`torch.Tensor.reshape`.
Arguments:
in_shape (torch.Size): The input event shape.
out_shape (torch.Size): The output event shape.
cache_size (int): Size of cache. If zero, no caching is done. If one,
the latest single value is cached. Only 0 and 1 are supported. (Default 0.)
"""
bijective = True
def __init__(
self,
in_shape: torch.Size,
out_shape: torch.Size,
cache_size: int = 0,
) -> None:
self.in_shape = torch.Size(in_shape)
self.out_shape = torch.Size(out_shape)
if self.in_shape.numel() != self.out_shape.numel():
raise ValueError("in_shape, out_shape have different numbers of elements")
super().__init__(cache_size=cache_size)
@constraints.dependent_property
# pyrefly: ignore [bad-override]
def domain(self):
return constraints.independent(constraints.real, len(self.in_shape))
@constraints.dependent_property
# pyrefly: ignore [bad-override]
def codomain(self):
return constraints.independent(constraints.real, len(self.out_shape))
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return ReshapeTransform(self.in_shape, self.out_shape, cache_size=cache_size)
def _call(self, x):
batch_shape = x.shape[: x.dim() - len(self.in_shape)]
return x.reshape(batch_shape + self.out_shape)
def _inverse(self, y):
batch_shape = y.shape[: y.dim() - len(self.out_shape)]
return y.reshape(batch_shape + self.in_shape)
def log_abs_det_jacobian(self, x, y):
batch_shape = x.shape[: x.dim() - len(self.in_shape)]
return x.new_zeros(batch_shape)
def forward_shape(self, shape):
if len(shape) < len(self.in_shape):
raise ValueError("Too few dimensions on input")
cut = len(shape) - len(self.in_shape)
if shape[cut:] != self.in_shape:
raise ValueError(
f"Shape mismatch: expected {shape[cut:]} but got {self.in_shape}"
)
return shape[:cut] + self.out_shape
def inverse_shape(self, shape):
if len(shape) < len(self.out_shape):
raise ValueError("Too few dimensions on input")
cut = len(shape) - len(self.out_shape)
if shape[cut:] != self.out_shape:
raise ValueError(
f"Shape mismatch: expected {shape[cut:]} but got {self.out_shape}"
)
return shape[:cut] + self.in_shape
| ReshapeTransform |
python | doocs__leetcode | solution/3700-3799/3755.Find Maximum Balanced XOR Subarray Length/Solution.py | {
"start": 0,
"end": 372
} | class ____:
def maxBalancedSubarray(self, nums: List[int]) -> int:
d = {(0, 0): -1}
a = b = 0
ans = 0
for i, x in enumerate(nums):
a ^= x
b += 1 if x % 2 == 0 else -1
if (a, b) in d:
ans = max(ans, i - d[(a, b)])
else:
d[(a, b)] = i
return ans
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/llm.py | {
"start": 3373,
"end": 3966
} | class ____(BaseEvent):
"""
LLMCompletionEndEvent.
Args:
prompt (str): The prompt to be completed.
response (CompletionResponse): Completion response.
"""
prompt: str
response: CompletionResponse
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMCompletionEndEvent"
def model_dump(self, **kwargs: Any) -> Dict[str, Any]:
if isinstance(self.response.raw, BaseModel):
self.response.raw = self.response.raw.model_dump()
return super().model_dump(**kwargs)
| LLMCompletionEndEvent |
python | pola-rs__polars | py-polars/src/polars/functions/lazy.py | {
"start": 23754,
"end": 71866
} | class ____:
def __init__(
self,
function: Callable[[Sequence[Series]], Series | Any],
*,
returns_scalar: bool,
) -> None:
self.function = function
self.returns_scalar = returns_scalar
def __call__(
self, sl: list[plr.PySeries], *args: Any, **kwargs: Any
) -> plr.PySeries:
return_dtype = kwargs["return_dtype"]
slp = [wrap_s(s) for s in sl]
# ufunc and numba don't expect return_dtype
try:
rv = self.function(slp, *args, **kwargs)
except TypeError as e:
if "unexpected keyword argument 'return_dtype'" in e.args[0]:
kwargs.pop("return_dtype")
rv = self.function(slp, *args, **kwargs)
else:
raise
if _check_for_numpy(rv) and isinstance(rv, np.ndarray):
rv = pl.Series(rv, dtype=return_dtype)
if isinstance(rv, pl.Series):
return rv._s
elif self.returns_scalar:
return pl.Series([rv], dtype=return_dtype)._s
else:
msg = f"`map` with `returns_scalar=False` must return a Series; found {qualified_type_name(rv)!r}.\n\nIf `returns_scalar` is set to `True`, a returned value can be a scalar value."
raise TypeError(msg)
def map_batches(
exprs: Sequence[str | Expr],
function: Callable[[Sequence[Series]], Series | Any],
return_dtype: PolarsDataType | pl.DataTypeExpr | None = None,
*,
is_elementwise: bool = False,
returns_scalar: bool = False,
) -> Expr:
"""
Map a custom function over multiple columns/expressions.
Produces a single Series result.
.. warning::
This method is much slower than the native expressions API.
Only use it if you cannot implement your logic otherwise.
Parameters
----------
exprs
Expression(s) representing the input Series to the function.
function
Function to apply over the input.
return_dtype
Datatype of the output Series.
It is recommended to set this whenever possible. If this is `None`, it tries
to infer the datatype by calling the function with dummy data and looking at
the output.
is_elementwise
Set to true if the operations is elementwise for better performance
and optimization.
An elementwise operations has unit or equal length for all inputs
and can be ran sequentially on slices without results being affected.
returns_scalar
If the function returns a scalar, by default it will be wrapped in
a list in the output, since the assumption is that the function
always returns something Series-like. If you want to keep the
result as a scalar, set this argument to True.
Notes
-----
A UDF passed to `map_batches` must be pure, meaning that it cannot modify
or depend on state other than its arguments. We may call the function
with arbitrary input data.
Returns
-------
Expr
Expression with the data type given by `return_dtype`.
Examples
--------
>>> def test_func(a, b, c):
... return a + b + c
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 4],
... "b": [4, 5, 6, 7],
... }
... )
>>>
>>> df.with_columns(
... (
... pl.struct(["a", "b"]).map_batches(
... lambda x: test_func(x.struct.field("a"), x.struct.field("b"), 1)
... )
... ).alias("a+b+c")
... )
shape: (4, 3)
┌─────┬─────┬───────┐
│ a ┆ b ┆ a+b+c │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═══════╡
│ 1 ┆ 4 ┆ 6 │
│ 2 ┆ 5 ┆ 8 │
│ 3 ┆ 6 ┆ 10 │
│ 4 ┆ 7 ┆ 12 │
└─────┴─────┴───────┘
"""
pyexprs = parse_into_list_of_expressions(exprs)
return_dtype_expr = (
parse_into_datatype_expr(return_dtype)._pydatatype_expr
if return_dtype is not None
else None
)
return wrap_expr(
plr.map_expr(
pyexprs,
_map_batches_wrapper(function, returns_scalar=returns_scalar),
return_dtype_expr,
is_elementwise=is_elementwise,
returns_scalar=returns_scalar,
)
)
def map_groups(
exprs: Sequence[str | Expr],
function: Callable[[Sequence[Series]], Series | Any],
return_dtype: PolarsDataType | pl.DataTypeExpr | None = None,
*,
is_elementwise: bool = False,
returns_scalar: bool = False,
) -> Expr:
"""
Apply a custom/user-defined function (UDF) in a GroupBy context.
.. warning::
This method is much slower than the native expressions API.
Only use it if you cannot implement your logic otherwise.
Parameters
----------
exprs
Expression(s) representing the input Series to the function.
function
Function to apply over the input; should be of type Callable[[Series], Series].
return_dtype
Datatype of the output Series.
It is recommended to set this whenever possible. If this is `None`, it tries
to infer the datatype by calling the function with dummy data and looking at
the output.
is_elementwise
Set to true if the operations is elementwise for better performance
and optimization.
An elementwise operations has unit or equal length for all inputs
and can be ran sequentially on slices without results being affected.
returns_scalar
If the function returns a single scalar as output.
Notes
-----
A UDF passed to `map_batches` must be pure, meaning that it cannot modify
or depend on state other than its arguments. Polars may call the function
with arbitrary input data.
Returns
-------
Expr
Expression with the data type given by `return_dtype`.
Examples
--------
>>> df = pl.DataFrame(
... {
... "group": [1, 1, 2],
... "a": [1, 3, 3],
... "b": [5, 6, 7],
... }
... )
>>> df
shape: (3, 3)
┌───────┬─────┬─────┐
│ group ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═══════╪═════╪═════╡
│ 1 ┆ 1 ┆ 5 │
│ 1 ┆ 3 ┆ 6 │
│ 2 ┆ 3 ┆ 7 │
└───────┴─────┴─────┘
>>> (
... df.group_by("group").agg(
... pl.map_groups(
... exprs=["a", "b"],
... function=lambda list_of_series: list_of_series[0]
... / list_of_series[0].sum()
... + list_of_series[1],
... return_dtype=pl.Float64,
... ).alias("my_custom_aggregation")
... )
... ).sort("group")
shape: (2, 2)
┌───────┬───────────────────────┐
│ group ┆ my_custom_aggregation │
│ --- ┆ --- │
│ i64 ┆ list[f64] │
╞═══════╪═══════════════════════╡
│ 1 ┆ [5.25, 6.75] │
│ 2 ┆ [8.0] │
└───────┴───────────────────────┘
The output for group `1` can be understood as follows:
- group `1` contains Series `'a': [1, 3]` and `'b': [5, 6]`
- applying the function to those lists of Series, one gets the output
`[1 / 4 + 5, 3 / 4 + 6]`, i.e. `[5.25, 6.75]`
"""
return map_batches(
exprs,
function,
return_dtype,
is_elementwise=is_elementwise,
returns_scalar=returns_scalar,
)
def _row_encode(
exprs: pl.Selector | pl.Expr | Sequence[str | pl.Expr],
*,
unordered: bool = False,
descending: list[bool] | None = None,
nulls_last: list[bool] | None = None,
) -> Expr:
if isinstance(exprs, pl.Selector):
exprs = [exprs.as_expr()]
elif isinstance(exprs, pl.Expr):
exprs = [exprs]
pyexprs = parse_into_list_of_expressions(exprs)
if unordered:
assert descending is None
assert nulls_last is None
result = plr.PyExpr.row_encode_unordered(pyexprs)
else:
result = plr.PyExpr.row_encode_ordered(pyexprs, descending, nulls_last)
return wrap_expr(result)
def _wrap_acc_lambda(
function: Callable[[Series, Series], Series],
) -> Callable[[tuple[plr.PySeries, plr.PySeries]], plr.PySeries]:
def wrapper(t: tuple[plr.PySeries, plr.PySeries]) -> plr.PySeries:
a, b = t
return function(wrap_s(a), wrap_s(b))._s
return wrapper
def fold(
acc: IntoExpr,
function: Callable[[Series, Series], Series],
exprs: Sequence[Expr | str] | Expr,
*,
returns_scalar: bool = False,
return_dtype: pl.DataTypeExpr | PolarsDataType | None = None,
) -> Expr:
"""
Accumulate over multiple columns horizontally/ row wise with a left fold.
Parameters
----------
acc
Accumulator Expression. This is the value that will be initialized when the fold
starts. For a sum this could for instance be lit(0).
function
Function to apply over the accumulator and the value.
Fn(acc, value) -> new_value
exprs
Expressions to aggregate over. May also be a wildcard expression.
returns_scalar
Whether or not `function` applied returns a scalar. This must be set correctly
by the user.
return_dtype
Output datatype.
If not set, the dtype will be inferred based on the dtype
of the accumulator.
Notes
-----
If you simply want the first encountered expression as accumulator,
consider using `reduce`.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": [3, 4, 5],
... "c": [5, 6, 7],
... }
... )
>>> df
shape: (3, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 1 ┆ 3 ┆ 5 │
│ 2 ┆ 4 ┆ 6 │
│ 3 ┆ 5 ┆ 7 │
└─────┴─────┴─────┘
Horizontally sum over all columns and add 1.
>>> df.select(
... pl.fold(
... acc=pl.lit(1), function=lambda acc, x: acc + x, exprs=pl.col("*")
... ).alias("sum"),
... )
shape: (3, 1)
┌─────┐
│ sum │
│ --- │
│ i32 │
╞═════╡
│ 10 │
│ 13 │
│ 16 │
└─────┘
You can also apply a condition/predicate on all columns:
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": [0, 1, 2],
... }
... )
>>> df
shape: (3, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 0 │
│ 2 ┆ 1 │
│ 3 ┆ 2 │
└─────┴─────┘
>>> df.filter(
... pl.fold(
... acc=pl.lit(True),
... function=lambda acc, x: acc & x,
... exprs=pl.col("*") > 1,
... )
... )
shape: (1, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 3 ┆ 2 │
└─────┴─────┘
"""
# in case of col("*")
pyacc = parse_into_expression(acc, str_as_lit=True)
if isinstance(exprs, pl.Expr):
exprs = [exprs]
rt: plr.PyDataTypeExpr | None = None
if return_dtype is not None:
rt = parse_into_datatype_expr(return_dtype)._pydatatype_expr
pyexprs = parse_into_list_of_expressions(exprs)
return wrap_expr(
plr.fold(
pyacc,
_wrap_acc_lambda(function),
pyexprs,
returns_scalar=returns_scalar,
return_dtype=rt,
)
)
def reduce(
function: Callable[[Series, Series], Series],
exprs: Sequence[Expr | str] | Expr,
*,
returns_scalar: bool = False,
return_dtype: pl.DataTypeExpr | PolarsDataType | None = None,
) -> Expr:
"""
Accumulate over multiple columns horizontally/ row wise with a left fold.
Parameters
----------
function
Function to apply over the accumulator and the value.
Fn(acc, value) -> new_value
exprs
Expressions to aggregate over. May also be a wildcard expression.
returns_scalar
Whether or not `function` applied returns a scalar. This must be set correctly
by the user.
return_dtype
Output datatype.
If not set, the dtype will be inferred based on the dtype of the input
expressions.
Notes
-----
See `fold` for the version with an explicit accumulator.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": [0, 1, 2],
... }
... )
>>> df
shape: (3, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 0 │
│ 2 ┆ 1 │
│ 3 ┆ 2 │
└─────┴─────┘
Horizontally sum over all columns.
>>> df.select(
... pl.reduce(function=lambda acc, x: acc + x, exprs=pl.col("*")).alias("sum")
... )
shape: (3, 1)
┌─────┐
│ sum │
│ --- │
│ i64 │
╞═════╡
│ 1 │
│ 3 │
│ 5 │
└─────┘
"""
if isinstance(exprs, pl.Expr):
exprs = [exprs]
rt: plr.PyDataTypeExpr | None = None
if return_dtype is not None:
rt = parse_into_datatype_expr(return_dtype)._pydatatype_expr
pyexprs = parse_into_list_of_expressions(exprs)
return wrap_expr(
plr.reduce(
_wrap_acc_lambda(function),
pyexprs,
returns_scalar=returns_scalar,
return_dtype=rt,
)
)
def cum_fold(
acc: IntoExpr,
function: Callable[[Series, Series], Series],
exprs: Sequence[Expr | str] | Expr,
*,
returns_scalar: bool = False,
return_dtype: pl.DataTypeExpr | PolarsDataType | None = None,
include_init: bool = False,
) -> Expr:
"""
Cumulatively fold horizontally across columns with a left fold.
Every cumulative result is added as a separate field in a Struct column.
Parameters
----------
acc
Accumulator expression. This is the value that will be initialized when the fold
starts. For a sum this could for instance be lit(0).
function
Function to apply over the accumulator and the value.
Fn(acc, value) -> new_value
exprs
Expressions to aggregate over. May also be a wildcard expression.
returns_scalar
Whether or not `function` applied returns a scalar. This must be set correctly
by the user.
return_dtype
Output datatype.
If not set, the dtype will be inferred based on the dtype of the accumulator.
include_init
Include the initial accumulator state as struct field.
Notes
-----
If you simply want the first encountered expression as accumulator,
consider using :func:`cum_reduce`.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": [3, 4, 5],
... "c": [5, 6, 7],
... }
... )
>>> df.with_columns(
... pl.cum_fold(acc=pl.lit(1), function=lambda acc, x: acc + x, exprs=pl.all())
... )
shape: (3, 4)
┌─────┬─────┬─────┬───────────┐
│ a ┆ b ┆ c ┆ cum_fold │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 ┆ struct[3] │
╞═════╪═════╪═════╪═══════════╡
│ 1 ┆ 3 ┆ 5 ┆ {2,5,10} │
│ 2 ┆ 4 ┆ 6 ┆ {3,7,13} │
│ 3 ┆ 5 ┆ 7 ┆ {4,9,16} │
└─────┴─────┴─────┴───────────┘
"""
# in case of col("*")
pyacc = parse_into_expression(acc, str_as_lit=True)
if isinstance(exprs, pl.Expr):
exprs = [exprs]
rt: plr.PyDataTypeExpr | None = None
if return_dtype is not None:
rt = parse_into_datatype_expr(return_dtype)._pydatatype_expr
pyexprs = parse_into_list_of_expressions(exprs)
return wrap_expr(
plr.cum_fold(
pyacc,
_wrap_acc_lambda(function),
pyexprs,
returns_scalar=returns_scalar,
return_dtype=rt,
include_init=include_init,
).alias("cum_fold")
)
def cum_reduce(
function: Callable[[Series, Series], Series],
exprs: Sequence[Expr | str] | Expr,
*,
returns_scalar: bool = False,
return_dtype: pl.DataTypeExpr | PolarsDataType | None = None,
) -> Expr:
"""
Cumulatively reduce horizontally across columns with a left fold.
Every cumulative result is added as a separate field in a Struct column.
Parameters
----------
function
Function to apply over the accumulator and the value.
Fn(acc, value) -> new_value
exprs
Expressions to aggregate over. May also be a wildcard expression.
returns_scalar
Whether or not `function` applied returns a scalar. This must be set correctly
by the user.
return_dtype
Output datatype.
If not set, the dtype will be inferred based on the dtype of the input
expressions.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": [3, 4, 5],
... "c": [5, 6, 7],
... }
... )
>>> df.with_columns(pl.cum_reduce(function=lambda acc, x: acc + x, exprs=pl.all()))
shape: (3, 4)
┌─────┬─────┬─────┬────────────┐
│ a ┆ b ┆ c ┆ cum_reduce │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 ┆ struct[3] │
╞═════╪═════╪═════╪════════════╡
│ 1 ┆ 3 ┆ 5 ┆ {1,4,9} │
│ 2 ┆ 4 ┆ 6 ┆ {2,6,12} │
│ 3 ┆ 5 ┆ 7 ┆ {3,8,15} │
└─────┴─────┴─────┴────────────┘
"""
# in case of col("*")
if isinstance(exprs, pl.Expr):
exprs = [exprs]
rt: plr.PyDataTypeExpr | None = None
if return_dtype is not None:
rt = parse_into_datatype_expr(return_dtype)._pydatatype_expr
pyexprs = parse_into_list_of_expressions(exprs)
return wrap_expr(
plr.cum_reduce(
_wrap_acc_lambda(function),
pyexprs,
returns_scalar=returns_scalar,
return_dtype=rt,
).alias("cum_reduce")
)
def arctan2(y: str | Expr, x: str | Expr) -> Expr:
"""
Compute two argument arctan in radians.
Returns the angle (in radians) in the plane between the
positive x-axis and the ray from the origin to (x,y).
Parameters
----------
y
Column name or Expression.
x
Column name or Expression.
Examples
--------
>>> c = (2**0.5) / 2
>>> df = pl.DataFrame(
... {
... "y": [c, -c, c, -c],
... "x": [c, c, -c, -c],
... }
... )
>>> df.with_columns(pl.arctan2("y", "x").alias("atan2"))
shape: (4, 3)
┌───────────┬───────────┬───────────┐
│ y ┆ x ┆ atan2 │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ f64 │
╞═══════════╪═══════════╪═══════════╡
│ 0.707107 ┆ 0.707107 ┆ 0.785398 │
│ -0.707107 ┆ 0.707107 ┆ -0.785398 │
│ 0.707107 ┆ -0.707107 ┆ 2.356194 │
│ -0.707107 ┆ -0.707107 ┆ -2.356194 │
└───────────┴───────────┴───────────┘
"""
if isinstance(y, str):
y = F.col(y)
if isinstance(x, str):
x = F.col(x)
if not hasattr(x, "_pyexpr"):
msg = f"`arctan2` expected a `str` or `Expr` got a `{qualified_type_name(x)}`"
raise TypeError(msg)
if not hasattr(y, "_pyexpr"):
msg = f"`arctan2` expected a `str` or `Expr` got a `{qualified_type_name(y)}`"
raise TypeError(msg)
return wrap_expr(plr.arctan2(y._pyexpr, x._pyexpr))
@deprecated("`arctan2d` is deprecated; use `arctan2` followed by `.degrees()` instead.")
def arctan2d(y: str | Expr, x: str | Expr) -> Expr:
"""
Compute two argument arctan in degrees.
.. deprecated:: 1.0.0
Use `arctan2` followed by :meth:`Expr.degrees` instead.
Returns the angle (in degrees) in the plane between the positive x-axis
and the ray from the origin to (x,y).
Parameters
----------
y
Column name or Expression.
x
Column name or Expression.
Examples
--------
>>> c = (2**0.5) / 2
>>> df = pl.DataFrame(
... {
... "y": [c, -c, c, -c],
... "x": [c, c, -c, -c],
... }
... )
>>> df.select( # doctest: +SKIP
... pl.arctan2d("y", "x").alias("atan2d"),
... pl.arctan2("y", "x").alias("atan2"),
... )
shape: (4, 2)
┌────────┬───────────┐
│ atan2d ┆ atan2 │
│ --- ┆ --- │
│ f64 ┆ f64 │
╞════════╪═══════════╡
│ 45.0 ┆ 0.785398 │
│ -45.0 ┆ -0.785398 │
│ 135.0 ┆ 2.356194 │
│ -135.0 ┆ -2.356194 │
└────────┴───────────┘
"""
return arctan2(y, x).degrees()
def exclude(
columns: str | PolarsDataType | Collection[str] | Collection[PolarsDataType],
*more_columns: str | PolarsDataType,
) -> Expr:
"""
Represent all columns except for the given columns.
Syntactic sugar for `pl.all().exclude(columns)`.
Parameters
----------
columns
The name or datatype of the column(s) to exclude. Accepts regular expression
input. Regular expressions should start with `^` and end with `$`.
*more_columns
Additional names or datatypes of columns to exclude, specified as positional
arguments.
Examples
--------
Exclude by column name(s):
>>> df = pl.DataFrame(
... {
... "aa": [1, 2, 3],
... "ba": ["a", "b", None],
... "cc": [None, 2.5, 1.5],
... }
... )
>>> df.select(pl.exclude("ba"))
shape: (3, 2)
┌─────┬──────┐
│ aa ┆ cc │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪══════╡
│ 1 ┆ null │
│ 2 ┆ 2.5 │
│ 3 ┆ 1.5 │
└─────┴──────┘
Exclude by regex, e.g. removing all columns whose names end with the letter "a":
>>> df.select(pl.exclude("^.*a$"))
shape: (3, 1)
┌──────┐
│ cc │
│ --- │
│ f64 │
╞══════╡
│ null │
│ 2.5 │
│ 1.5 │
└──────┘
Exclude by dtype(s), e.g. removing all columns of type Int64 or Float64:
>>> df.select(pl.exclude([pl.Int64, pl.Float64]))
shape: (3, 1)
┌──────┐
│ ba │
│ --- │
│ str │
╞══════╡
│ a │
│ b │
│ null │
└──────┘
"""
return F.col("*").exclude(columns, *more_columns)
def groups(column: str) -> Expr:
"""
Syntactic sugar for `pl.col("foo").agg_groups()`.
.. deprecated:: 1.35
Use `df.with_row_index().group_by(...).agg(pl.col('index'))` instead.
This method will be removed in Polars 2.0.
"""
warnings.warn(
"pl.groups() is deprecated and will be removed in Polars 2.0. "
"Use df.with_row_index().group_by(...).agg(pl.col('index')) instead.",
DeprecationWarning,
stacklevel=2,
)
return F.col(column).agg_groups()
def quantile(
column: str,
quantile: float | Expr,
interpolation: QuantileMethod = "nearest",
) -> Expr:
"""
Syntactic sugar for `pl.col("foo").quantile(..)`.
Parameters
----------
column
Column name.
quantile
Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
Interpolation method.
"""
return F.col(column).quantile(quantile, interpolation)
def arg_sort_by(
exprs: IntoExpr | Iterable[IntoExpr],
*more_exprs: IntoExpr,
descending: bool | Sequence[bool] = False,
nulls_last: bool | Sequence[bool] = False,
multithreaded: bool = True,
maintain_order: bool = False,
) -> Expr:
"""
Return the row indices that would sort the column(s).
Parameters
----------
exprs
Column(s) to arg sort by. Accepts expression input. Strings are parsed as column
names.
*more_exprs
Additional columns to arg sort by, specified as positional arguments.
descending
Sort in descending order. When sorting by multiple columns, can be specified
per column by passing a sequence of booleans.
nulls_last
Place null values last.
multithreaded
Sort using multiple threads.
maintain_order
Whether the order should be maintained if elements are equal.
See Also
--------
Expr.gather: Take values by index.
Expr.rank : Get the rank of each row.
Examples
--------
Pass a single column name to compute the arg sort by that column.
>>> df = pl.DataFrame(
... {
... "a": [0, 1, 1, 0],
... "b": [3, 2, 3, 2],
... "c": [1, 2, 3, 4],
... }
... )
>>> df.select(pl.arg_sort_by("a"))
shape: (4, 1)
┌─────┐
│ a │
│ --- │
│ u32 │
╞═════╡
│ 0 │
│ 3 │
│ 1 │
│ 2 │
└─────┘
Compute the arg sort by multiple columns by either passing a list of columns, or by
specifying each column as a positional argument.
>>> df.select(pl.arg_sort_by(["a", "b"], descending=True))
shape: (4, 1)
┌─────┐
│ a │
│ --- │
│ u32 │
╞═════╡
│ 2 │
│ 1 │
│ 0 │
│ 3 │
└─────┘
Use gather to apply the arg sort to other columns.
>>> df.select(pl.col("c").gather(pl.arg_sort_by("a")))
shape: (4, 1)
┌─────┐
│ c │
│ --- │
│ i64 │
╞═════╡
│ 1 │
│ 4 │
│ 2 │
│ 3 │
└─────┘
"""
exprs = parse_into_list_of_expressions(exprs, *more_exprs)
descending = extend_bool(descending, len(exprs), "descending", "exprs")
nulls_last = extend_bool(nulls_last, len(exprs), "nulls_last", "exprs")
return wrap_expr(
plr.arg_sort_by(exprs, descending, nulls_last, multithreaded, maintain_order)
)
@deprecate_streaming_parameter()
@forward_old_opt_flags()
def collect_all(
lazy_frames: Iterable[LazyFrame],
*,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
no_optimization: bool = False,
slice_pushdown: bool = True,
comm_subplan_elim: bool = True,
comm_subexpr_elim: bool = True,
cluster_with_columns: bool = True,
collapse_joins: bool = True,
optimizations: QueryOptFlags = DEFAULT_QUERY_OPT_FLAGS,
engine: EngineType = "auto",
) -> list[DataFrame]:
"""
Collect multiple LazyFrames at the same time.
This can run all the computation graphs in parallel or combined.
Common Subplan Elimination is applied on the combined plan, meaning
that diverging queries will run only once.
Parameters
----------
lazy_frames
A list of LazyFrames to collect.
type_coercion
Do type coercion optimization.
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
predicate_pushdown
Do predicate pushdown optimization.
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
projection_pushdown
Do projection pushdown optimization.
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
simplify_expression
Run simplify expressions optimization.
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
no_optimization
Turn off optimizations.
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
slice_pushdown
Slice pushdown optimization.
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
comm_subplan_elim
Will try to cache branching subplans that occur on self-joins or unions.
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
comm_subexpr_elim
Common subexpressions will be cached and reused.
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
cluster_with_columns
Combine sequential independent calls to with_columns
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
collapse_joins
Collapse a join and filters into a faster join
.. deprecated:: 1.30.0
Use the `optimizations` parameters.
optimizations
The optimization passes done during query optimization.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
engine
Select the engine used to process the query, optional.
At the moment, if set to `"auto"` (default), the query
is run using the polars in-memory engine. Polars will also
attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
environment variable. If it cannot run the query using the
selected engine, the query is run using the polars in-memory
engine.
.. note::
The GPU engine does not support async, or running in the
background. If either are enabled, then GPU execution is switched off.
Returns
-------
list of DataFrames
The collected DataFrames, returned in the same order as the input LazyFrames.
"""
if engine == "streaming":
issue_unstable_warning("streaming mode is considered unstable.")
lfs = [lf._ldf for lf in lazy_frames]
out = plr.collect_all(lfs, engine, optimizations._pyoptflags)
# wrap the pydataframes into dataframe
result = [wrap_df(pydf) for pydf in out]
return result
@overload
def collect_all_async(
lazy_frames: Iterable[LazyFrame],
*,
gevent: Literal[True],
engine: EngineType = "auto",
optimizations: QueryOptFlags = DEFAULT_QUERY_OPT_FLAGS,
) -> _GeventDataFrameResult[list[DataFrame]]: ...
@overload
def collect_all_async(
lazy_frames: Iterable[LazyFrame],
*,
gevent: Literal[False] = False,
engine: EngineType = "auto",
optimizations: QueryOptFlags = DEFAULT_QUERY_OPT_FLAGS,
) -> Awaitable[list[DataFrame]]: ...
@unstable()
@deprecate_streaming_parameter()
def collect_all_async(
lazy_frames: Iterable[LazyFrame],
*,
gevent: bool = False,
engine: EngineType = "auto",
optimizations: QueryOptFlags = DEFAULT_QUERY_OPT_FLAGS,
) -> Awaitable[list[DataFrame]] | _GeventDataFrameResult[list[DataFrame]]:
"""
Collect multiple LazyFrames at the same time asynchronously in thread pool.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Collects into a list of DataFrame (like :func:`polars.collect_all`),
but instead of returning them directly, they are scheduled to be collected
inside thread pool, while this method returns almost instantly.
May be useful if you use gevent or asyncio and want to release control to other
greenlets/tasks while LazyFrames are being collected.
Parameters
----------
lazy_frames
A list of LazyFrames to collect.
gevent
Return wrapper to `gevent.event.AsyncResult` instead of Awaitable
optimizations
The optimization passes done during query optimization.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
engine
Select the engine used to process the query, optional.
At the moment, if set to `"auto"` (default), the query
is run using the polars in-memory engine. Polars will also
attempt to use the engine set by the `POLARS_ENGINE_AFFINITY`
environment variable. If it cannot run the query using the
selected engine, the query is run using the polars in-memory
engine.
.. note::
The GPU engine does not support async, or running in the
background. If either are enabled, then GPU execution is switched off.
See Also
--------
polars.collect_all : Collect multiple LazyFrames at the same time.
LazyFrame.collect_async : To collect single frame.
Notes
-----
In case of error `set_exception` is used on
`asyncio.Future`/`gevent.event.AsyncResult` and will be reraised by them.
Returns
-------
If `gevent=False` (default) then returns awaitable.
If `gevent=True` then returns wrapper that has
`.get(block=True, timeout=None)` method.
"""
if engine == "streaming":
issue_unstable_warning("streaming mode is considered unstable.")
result: (
_GeventDataFrameResult[list[DataFrame]] | _AioDataFrameResult[list[DataFrame]]
) = _GeventDataFrameResult() if gevent else _AioDataFrameResult()
lfs = [lf._ldf for lf in lazy_frames]
plr.collect_all_with_callback(
lfs, engine, optimizations._pyoptflags, result._callback_all
)
return result
@unstable()
def explain_all(
lazy_frames: Iterable[LazyFrame],
*,
optimizations: QueryOptFlags = DEFAULT_QUERY_OPT_FLAGS,
) -> str:
"""
Explain multiple LazyFrames as if passed to `collect_all`.
Common Subplan Elimination is applied on the combined plan, meaning
that diverging queries will run only once.
Parameters
----------
lazy_frames
A list of LazyFrames to collect.
optimizations
The optimization passes done during query optimization.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Returns
-------
Explained plan.
"""
lfs = [lf._ldf for lf in lazy_frames]
return plr.explain_all(lfs, optimizations._pyoptflags)
@overload
def select(
*exprs: IntoExpr | Iterable[IntoExpr],
eager: Literal[True] = ...,
**named_exprs: IntoExpr,
) -> DataFrame: ...
@overload
def select(
*exprs: IntoExpr | Iterable[IntoExpr],
eager: Literal[False],
**named_exprs: IntoExpr,
) -> LazyFrame: ...
def select(
*exprs: IntoExpr | Iterable[IntoExpr], eager: bool = True, **named_exprs: IntoExpr
) -> DataFrame | LazyFrame:
"""
Run polars expressions without a context.
This is syntactic sugar for running `df.select` on an empty DataFrame
(or LazyFrame if eager=False).
Parameters
----------
*exprs
Column(s) to select, specified as positional arguments.
Accepts expression input. Strings are parsed as column names,
other non-expression inputs are parsed as literals.
eager
Evaluate immediately and return a `DataFrame` (default); if set to `False`,
return a `LazyFrame` instead.
**named_exprs
Additional columns to select, specified as keyword arguments.
The columns will be renamed to the keyword used.
Returns
-------
DataFrame or LazyFrame
Examples
--------
>>> foo = pl.Series("foo", [1, 2, 3])
>>> bar = pl.Series("bar", [3, 2, 1])
>>> pl.select(min=pl.min_horizontal(foo, bar))
shape: (3, 1)
┌─────┐
│ min │
│ --- │
│ i64 │
╞═════╡
│ 1 │
│ 2 │
│ 1 │
└─────┘
>>> pl.select(pl.int_range(0, 100_000, 2).alias("n"), eager=False).filter(
... pl.col("n") % 22_500 == 0
... ).collect()
shape: (5, 1)
┌───────┐
│ n │
│ --- │
│ i64 │
╞═══════╡
│ 0 │
│ 22500 │
│ 45000 │
│ 67500 │
│ 90000 │
└───────┘
"""
empty_frame = pl.DataFrame() if eager else pl.LazyFrame()
return empty_frame.select(*exprs, **named_exprs)
@overload
def arg_where(condition: Expr | Series, *, eager: Literal[False] = ...) -> Expr: ...
@overload
def arg_where(condition: Expr | Series, *, eager: Literal[True]) -> Series: ...
def arg_where(condition: Expr | Series, *, eager: bool = False) -> Expr | Series:
"""
Return indices where `condition` evaluates `True`.
Parameters
----------
condition
Boolean expression to evaluate
eager
Evaluate immediately and return a `Series`; this requires that the given
condition is itself a `Series`. If set to `False` (default), return
an expression instead.
See Also
--------
Series.arg_true : Return indices where Series is True
Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3, 4, 5]})
>>> df.select(
... [
... pl.arg_where(pl.col("a") % 2 == 0),
... ]
... ).to_series()
shape: (2,)
Series: 'a' [u32]
[
1
3
]
"""
if eager:
if not isinstance(condition, pl.Series):
msg = (
"expected Series in 'arg_where' if 'eager=True', got"
f" {type(condition).__name__!r}"
)
raise ValueError(msg)
return condition.to_frame().select(arg_where(F.col(condition.name))).to_series()
else:
condition_pyexpr = parse_into_expression(condition)
return wrap_expr(plr.arg_where(condition_pyexpr))
@overload
def coalesce(
exprs: IntoExpr | Iterable[IntoExpr],
*more_exprs: IntoExpr,
eager: Literal[False] = ...,
) -> Expr: ...
@overload
def coalesce(
exprs: IntoExpr | Iterable[IntoExpr],
*more_exprs: IntoExpr,
eager: Literal[True],
) -> Series: ...
@overload
def coalesce(
exprs: IntoExpr | Iterable[IntoExpr],
*more_exprs: IntoExpr,
eager: bool,
) -> Expr | Series: ...
def coalesce(
exprs: IntoExpr | Iterable[IntoExpr],
*more_exprs: IntoExpr,
eager: bool = False,
) -> Expr | Series:
"""
Folds the columns from left to right, keeping the first non-null value.
Parameters
----------
exprs
Columns to coalesce. Accepts expression input. Strings are parsed as column
names, other non-expression inputs are parsed as literals.
*more_exprs
Additional columns to coalesce, specified as positional arguments.
eager
Evaluate immediately and return a `Series`; this requires that at least one
of the given arguments is a `Series`. If set to `False` (default), return
an expression instead.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, None, None, None],
... "b": [1, 2, None, None],
... "c": [5, None, 3, None],
... }
... )
>>> df.with_columns(pl.coalesce("a", "b", "c", 10).alias("d"))
shape: (4, 4)
┌──────┬──────┬──────┬─────┐
│ a ┆ b ┆ c ┆ d │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 ┆ i64 │
╞══════╪══════╪══════╪═════╡
│ 1 ┆ 1 ┆ 5 ┆ 1 │
│ null ┆ 2 ┆ null ┆ 2 │
│ null ┆ null ┆ 3 ┆ 3 │
│ null ┆ null ┆ null ┆ 10 │
└──────┴──────┴──────┴─────┘
>>> df.with_columns(pl.coalesce(pl.col(["a", "b", "c"]), 10.0).alias("d"))
shape: (4, 4)
┌──────┬──────┬──────┬──────┐
│ a ┆ b ┆ c ┆ d │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 ┆ f64 │
╞══════╪══════╪══════╪══════╡
│ 1 ┆ 1 ┆ 5 ┆ 1.0 │
│ null ┆ 2 ┆ null ┆ 2.0 │
│ null ┆ null ┆ 3 ┆ 3.0 │
│ null ┆ null ┆ null ┆ 10.0 │
└──────┴──────┴──────┴──────┘
>>> s1 = pl.Series("a", [None, 2, None])
>>> s2 = pl.Series("b", [1, None, 3])
>>> pl.coalesce(s1, s2, eager=True)
shape: (3,)
Series: 'a' [i64]
[
1
2
3
]
"""
if eager:
exprs = [exprs, *more_exprs]
if not (series := [e for e in exprs if isinstance(e, pl.Series)]):
msg = "expected at least one Series in 'coalesce' if 'eager=True'"
raise ValueError(msg)
exprs = [(e.name if isinstance(e, pl.Series) else e) for e in exprs]
return pl.DataFrame(series).select(coalesce(exprs, eager=False)).to_series()
else:
exprs = parse_into_list_of_expressions(exprs, *more_exprs)
return wrap_expr(plr.coalesce(exprs))
@overload
def from_epoch(column: str | Expr, time_unit: EpochTimeUnit = ...) -> Expr: ...
@overload
def from_epoch(
column: Series | Sequence[int], time_unit: EpochTimeUnit = ...
) -> Series: ...
def from_epoch(
column: str | Expr | Series | Sequence[int], time_unit: EpochTimeUnit = "s"
) -> Expr | Series:
"""
Utility function that parses an epoch timestamp (or Unix time) to Polars Date(time).
Depending on the `time_unit` provided, this function will return a different dtype:
- time_unit="d" returns pl.Date
- time_unit="s" returns pl.Datetime["us"] (pl.Datetime's default)
- time_unit="ms" returns pl.Datetime["ms"]
- time_unit="us" returns pl.Datetime["us"]
- time_unit="ns" returns pl.Datetime["ns"]
Parameters
----------
column
Series or expression to parse integers to pl.Datetime.
time_unit
The unit of time of the timesteps since epoch time.
Examples
--------
>>> df = pl.DataFrame({"timestamp": [1666683077, 1666683099]}).lazy()
>>> df.select(pl.from_epoch(pl.col("timestamp"), time_unit="s")).collect()
shape: (2, 1)
┌─────────────────────┐
│ timestamp │
│ --- │
│ datetime[μs] │
╞═════════════════════╡
│ 2022-10-25 07:31:17 │
│ 2022-10-25 07:31:39 │
└─────────────────────┘
The function can also be used in an eager context by passing a Series.
>>> s = pl.Series([12345, 12346])
>>> pl.from_epoch(s, time_unit="d")
shape: (2,)
Series: '' [date]
[
2003-10-20
2003-10-21
]
"""
if isinstance(column, str):
column = F.col(column)
elif not isinstance(column, (pl.Series, pl.Expr)):
column = pl.Series(column) # Sequence input handled by Series constructor
if time_unit == "d":
return column.cast(Date)
elif time_unit == "s":
return (column.cast(Int64) * 1_000_000).cast(Datetime("us"))
elif time_unit in DTYPE_TEMPORAL_UNITS:
return column.cast(Datetime(time_unit))
else:
msg = f"`time_unit` must be one of {{'ns', 'us', 'ms', 's', 'd'}}, got {time_unit!r}"
raise ValueError(msg)
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_cov(
a: str | Expr,
b: str | Expr,
*,
window_size: int,
min_samples: int | None = None,
ddof: int = 1,
) -> Expr:
"""
Compute the rolling covariance between two columns/ expressions.
The window at a given row includes the row itself and the
`window_size - 1` elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
a
Column name or Expression.
b
Column name or Expression.
window_size
The length of the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If None, it will be set equal to window size.
ddof
Delta degrees of freedom. The divisor used in calculations
is `N - ddof`, where `N` represents the number of elements.
"""
if min_samples is None:
min_samples = window_size
if isinstance(a, str):
a = F.col(a)
if isinstance(b, str):
b = F.col(b)
return wrap_expr(
plr.rolling_cov(a._pyexpr, b._pyexpr, window_size, min_samples, ddof)
)
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_corr(
a: str | Expr,
b: str | Expr,
*,
window_size: int,
min_samples: int | None = None,
ddof: int = 1,
) -> Expr:
"""
Compute the rolling correlation between two columns/ expressions.
The window at a given row includes the row itself and the
`window_size - 1` elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
a
Column name or Expression.
b
Column name or Expression.
window_size
The length of the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If None, it will be set equal to window size.
ddof
Delta degrees of freedom. The divisor used in calculations
is `N - ddof`, where `N` represents the number of elements.
"""
if min_samples is None:
min_samples = window_size
if isinstance(a, str):
a = F.col(a)
if isinstance(b, str):
b = F.col(b)
return wrap_expr(
plr.rolling_corr(a._pyexpr, b._pyexpr, window_size, min_samples, ddof)
)
@overload
def sql_expr(sql: str) -> Expr: # type: ignore[overload-overlap]
...
@overload
def sql_expr(sql: Sequence[str]) -> list[Expr]: ...
def sql_expr(sql: str | Sequence[str]) -> Expr | list[Expr]:
"""
Parse one or more SQL expressions to Polars expression(s).
Parameters
----------
sql
One or more SQL expressions.
Examples
--------
Parse a single SQL expression:
>>> df = pl.DataFrame({"a": [2, 1]})
>>> expr = pl.sql_expr("MAX(a)")
>>> df.select(expr)
shape: (1, 1)
┌─────┐
│ a │
│ --- │
│ i64 │
╞═════╡
│ 2 │
└─────┘
Parse multiple SQL expressions:
>>> df.with_columns(
... *pl.sql_expr(["POWER(a,a) AS a_a", "CAST(a AS TEXT) AS a_txt"]),
... )
shape: (2, 3)
┌─────┬─────┬───────┐
│ a ┆ a_a ┆ a_txt │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═══════╡
│ 2 ┆ 4 ┆ 2 │
│ 1 ┆ 1 ┆ 1 │
└─────┴─────┴───────┘
"""
if isinstance(sql, str):
return wrap_expr(plr.sql_expr(sql))
else:
return [wrap_expr(plr.sql_expr(q)) for q in sql]
@unstable()
def row_index(name: str = "index") -> pl.Expr:
"""
Generates a sequence of integers.
The length of the returned sequence will match the context length, and the
datatype will match the one returned by `get_index_dtype()`.
.. versionadded:: 1.32.0
If you would like to generate sequences with custom offsets / length /
step size / datatypes, it is recommended to use `int_range` instead.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Parameters
----------
name
Name of the returned column.
Returns
-------
Expr
Column of integers.
See Also
--------
int_range : Generate a range of integers.
Examples
--------
>>> df = pl.DataFrame({"x": ["A", "A", "B", "B", "B"]})
>>> df.with_columns(pl.row_index(), pl.row_index("another_index"))
shape: (5, 3)
┌─────┬───────┬───────────────┐
│ x ┆ index ┆ another_index │
│ --- ┆ --- ┆ --- │
│ str ┆ u32 ┆ u32 │
╞═════╪═══════╪═══════════════╡
│ A ┆ 0 ┆ 0 │
│ A ┆ 1 ┆ 1 │
│ B ┆ 2 ┆ 2 │
│ B ┆ 3 ┆ 3 │
│ B ┆ 4 ┆ 4 │
└─────┴───────┴───────────────┘
>>> df.group_by("x").agg(pl.row_index()).sort("x")
shape: (2, 2)
┌─────┬───────────┐
│ x ┆ index │
│ --- ┆ --- │
│ str ┆ list[u32] │
╞═════╪═══════════╡
│ A ┆ [0, 1] │
│ B ┆ [0, 1, 2] │
└─────┴───────────┘
>>> df.select(pl.row_index())
shape: (5, 1)
┌───────┐
│ index │
│ --- │
│ u32 │
╞═══════╡
│ 0 │
│ 1 │
│ 2 │
│ 3 │
│ 4 │
└───────┘
"""
# Notes
# * Dispatching to `int_range` means that we cannot accept an offset
# parameter, as unlike `DataFrame.with_row_index()`, `int_range` will simply
# truncate instead of raising an error.
return F.int_range(
F.len(),
dtype=get_index_type(),
).alias(name)
| _map_batches_wrapper |
python | doocs__leetcode | solution/0800-0899/0889.Construct Binary Tree from Preorder and Postorder Traversal/Solution.py | {
"start": 192,
"end": 847
} | class ____:
def constructFromPrePost(
self, preorder: List[int], postorder: List[int]
) -> Optional[TreeNode]:
def dfs(a: int, b: int, c: int, d: int) -> Optional[TreeNode]:
if a > b:
return None
root = TreeNode(preorder[a])
if a == b:
return root
i = pos[preorder[a + 1]]
m = i - c + 1
root.left = dfs(a + 1, a + m, c, i)
root.right = dfs(a + m + 1, b, i + 1, d - 1)
return root
pos = {x: i for i, x in enumerate(postorder)}
return dfs(0, len(preorder) - 1, 0, len(postorder) - 1)
| Solution |
python | spyder-ide__spyder | external-deps/python-lsp-server/pylsp/config/config.py | {
"start": 765,
"end": 1366
} | class ____(pluggy.PluginManager):
def _hookexec(
self,
hook_name: str,
methods: Sequence[HookImpl],
kwargs: Mapping[str, object],
firstresult: bool,
) -> Union[object, list[object]]:
# called from all hookcaller instances.
# enable_tracing will set its own wrapping function at self._inner_hookexec
try:
return self._inner_hookexec(hook_name, methods, kwargs, firstresult)
except Exception as e:
log.warning(f"Failed to load hook {hook_name}: {e}", exc_info=True)
return []
| PluginManager |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/event_api.py | {
"start": 4782,
"end": 5130
} | class ____(NamedTuple):
"""Return value for a query fetching event records from the instance. Contains a list of event
records, a cursor string, and a boolean indicating whether there are more records to fetch.
"""
records: Sequence[EventLogRecord]
cursor: str
has_more: bool
@whitelist_for_serdes
@public
| EventRecordsResult |
python | sympy__sympy | sympy/functions/elementary/integers.py | {
"start": 3248,
"end": 9664
} | class ____(RoundFunction):
"""
Floor is a univariate function which returns the largest integer
value not greater than its argument. This implementation
generalizes floor to complex numbers by taking the floor of the
real and imaginary parts separately.
Examples
========
>>> from sympy import floor, E, I, S, Float, Rational
>>> floor(17)
17
>>> floor(Rational(23, 10))
2
>>> floor(2*E)
5
>>> floor(-Float(0.567))
-1
>>> floor(-I/2)
-I
>>> floor(S(5)/2 + 5*I/2)
2 + 2*I
See Also
========
sympy.functions.elementary.integers.ceiling
References
==========
.. [1] "Concrete mathematics" by Graham, pp. 87
.. [2] https://mathworld.wolfram.com/FloorFunction.html
"""
_dir = -1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
return arg.floor()
if any(isinstance(i, j)
for i in (arg, -arg) for j in (floor, ceiling)):
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(Integer)[0]
@classmethod
def _eval_const_number(cls, arg):
if arg.is_real:
if arg.is_zero:
return S.Zero
if arg.is_positive:
num, den = arg.as_numer_denom()
s = den.is_negative
if s is None:
return None
if s:
num, den = -num, -den
# 0 <= num/den < 1 -> 0
if is_lt(num, den):
return S.Zero
# 1 <= num/den < 2 -> 1
if fuzzy_and([is_le(den, num), is_lt(num, 2*den)]):
return S.One
if arg.is_negative:
num, den = arg.as_numer_denom()
s = den.is_negative
if s is None:
return None
if s:
num, den = -num, -den
# -1 <= num/den < 0 -> -1
if is_le(-den, num):
return S.NegativeOne
# -2 <= num/den < -1 -> -2
if fuzzy_and([is_le(-2*den, num), is_lt(num, -den)]):
return Integer(-2)
def _eval_as_leading_term(self, x, logx, cdir):
from sympy.calculus.accumulationbounds import AccumBounds
arg = self.args[0]
arg0 = arg.subs(x, 0)
r = self.subs(x, 0)
if arg0 is S.NaN or isinstance(arg0, AccumBounds):
arg0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
r = floor(arg0)
if arg0.is_finite:
if arg0 == r:
ndir = arg.dir(x, cdir=cdir if cdir != 0 else 1)
if ndir.is_negative:
return r - 1
elif ndir.is_positive:
return r
else:
raise NotImplementedError("Not sure of sign of %s" % ndir)
else:
return r
return arg.as_leading_term(x, logx=logx, cdir=cdir)
def _eval_nseries(self, x, n, logx, cdir=0):
arg = self.args[0]
arg0 = arg.subs(x, 0)
r = self.subs(x, 0)
if arg0 is S.NaN:
arg0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
r = floor(arg0)
if arg0.is_infinite:
from sympy.calculus.accumulationbounds import AccumBounds
from sympy.series.order import Order
s = arg._eval_nseries(x, n, logx, cdir)
o = Order(1, (x, 0)) if n <= 0 else AccumBounds(-1, 0)
return s + o
if arg0 == r:
ndir = arg.dir(x, cdir=cdir if cdir != 0 else 1)
if ndir.is_negative:
return r - 1
elif ndir.is_positive:
return r
else:
raise NotImplementedError("Not sure of sign of %s" % ndir)
else:
return r
def _eval_is_negative(self):
return self.args[0].is_negative
def _eval_is_nonnegative(self):
return self.args[0].is_nonnegative
def _eval_rewrite_as_ceiling(self, arg, **kwargs):
return -ceiling(-arg)
def _eval_rewrite_as_frac(self, arg, **kwargs):
return arg - frac(arg)
def __le__(self, other):
other = S(other)
if self.args[0].is_real:
if other.is_integer:
return self.args[0] < other + 1
if other.is_number and other.is_real:
return self.args[0] < ceiling(other)
if self.args[0] == other and other.is_real:
return S.true
if other is S.Infinity and self.is_finite:
return S.true
return Le(self, other, evaluate=False)
def __ge__(self, other):
other = S(other)
if self.args[0].is_real:
if other.is_integer:
return self.args[0] >= other
if other.is_number and other.is_real:
return self.args[0] >= ceiling(other)
if self.args[0] == other and other.is_real and other.is_noninteger:
return S.false
if other is S.NegativeInfinity and self.is_finite:
return S.true
return Ge(self, other, evaluate=False)
def __gt__(self, other):
other = S(other)
if self.args[0].is_real:
if other.is_integer:
return self.args[0] >= other + 1
if other.is_number and other.is_real:
return self.args[0] >= ceiling(other)
if self.args[0] == other and other.is_real:
return S.false
if other is S.NegativeInfinity and self.is_finite:
return S.true
return Gt(self, other, evaluate=False)
def __lt__(self, other):
other = S(other)
if self.args[0].is_real:
if other.is_integer:
return self.args[0] < other
if other.is_number and other.is_real:
return self.args[0] < ceiling(other)
if self.args[0] == other and other.is_real and other.is_noninteger:
return S.true
if other is S.Infinity and self.is_finite:
return S.true
return Lt(self, other, evaluate=False)
@dispatch(floor, Expr)
def _eval_is_eq(lhs, rhs): # noqa:F811
return is_eq(lhs.rewrite(ceiling), rhs) or \
is_eq(lhs.rewrite(frac),rhs)
| floor |
python | ray-project__ray | rllib/utils/schedules/tests/test_schedules.py | {
"start": 286,
"end": 3770
} | class ____(unittest.TestCase):
"""Tests all time-step dependent Schedule classes."""
def test_constant_schedule(self):
value = 2.3
ts = [100, 0, 10, 2, 3, 4, 99, 56, 10000, 23, 234, 56]
config = {"value": value}
constant = from_config(ConstantSchedule, config, framework=None)
for t in ts:
out = constant(t)
check(out, value)
ts_as_tensors = self._get_framework_tensors(ts, None)
for t in ts_as_tensors:
out = constant(t)
check(out, value, decimals=4)
def test_linear_schedule(self):
ts = [0, 50, 10, 100, 90, 2, 1, 99, 23, 1000]
expected = [2.1 - (min(t, 100) / 100) * (2.1 - 0.6) for t in ts]
config = {"schedule_timesteps": 100, "initial_p": 2.1, "final_p": 0.6}
linear = from_config(LinearSchedule, config, framework=None)
for t, e in zip(ts, expected):
out = linear(t)
check(out, e, decimals=4)
ts_as_tensors = self._get_framework_tensors(ts, None)
for t, e in zip(ts_as_tensors, expected):
out = linear(t)
check(out, e, decimals=4)
def test_polynomial_schedule(self):
ts = [0, 5, 10, 100, 90, 2, 1, 99, 23, 1000]
expected = [0.5 + (2.0 - 0.5) * (1.0 - min(t, 100) / 100) ** 2 for t in ts]
config = dict(
type="ray.rllib.utils.schedules.polynomial_schedule.PolynomialSchedule",
schedule_timesteps=100,
initial_p=2.0,
final_p=0.5,
power=2.0,
)
polynomial = from_config(config, framework=None)
for t, e in zip(ts, expected):
out = polynomial(t)
check(out, e, decimals=4)
ts_as_tensors = self._get_framework_tensors(ts, None)
for t, e in zip(ts_as_tensors, expected):
out = polynomial(t)
check(out, e, decimals=4)
def test_exponential_schedule(self):
decay_rate = 0.2
ts = [0, 5, 10, 100, 90, 2, 1, 99, 23]
expected = [2.0 * decay_rate ** (t / 100) for t in ts]
config = dict(initial_p=2.0, decay_rate=decay_rate, schedule_timesteps=100)
exponential = from_config(ExponentialSchedule, config, framework=None)
for t, e in zip(ts, expected):
out = exponential(t)
check(out, e, decimals=4)
ts_as_tensors = self._get_framework_tensors(ts, None)
for t, e in zip(ts_as_tensors, expected):
out = exponential(t)
check(out, e, decimals=4)
def test_piecewise_schedule(self):
ts = [0, 5, 10, 100, 90, 2, 1, 99, 27]
expected = [50.0, 60.0, 70.0, 14.5, 14.5, 54.0, 52.0, 14.5, 140.0]
config = dict(
endpoints=[(0, 50.0), (25, 100.0), (30, 200.0)], outside_value=14.5
)
piecewise = from_config(PiecewiseSchedule, config, framework=None)
for t, e in zip(ts, expected):
out = piecewise(t)
check(out, e, decimals=4)
ts_as_tensors = self._get_framework_tensors(ts, None)
for t, e in zip(ts_as_tensors, expected):
out = piecewise(t)
check(out, e, decimals=4)
@staticmethod
def _get_framework_tensors(ts, fw):
if fw == "torch":
ts = [torch.tensor(t, dtype=torch.int32) for t in ts]
return ts
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestSchedules |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI012.py | {
"start": 121,
"end": 220
} | class ____:
pass # PYI012 Class body must not contain `pass`
value: int
| OneAttributeClassRev |
python | spack__spack | lib/spack/spack/spec.py | {
"start": 220431,
"end": 220558
} | class ____(spack.error.SpecError):
"""Raised when the same architecture occurs in a spec twice."""
| DuplicateArchitectureError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 118584,
"end": 120903
} | class ____(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_absolute_error(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_error = self.evaluate(error)
for _ in range(10):
self.assertEqual(initial_error, self.evaluate(error))
@test_util.run_deprecated_v1
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_absolute_error(labels, predictions, weights)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertEqual(3, self.evaluate(update_op))
self.assertEqual(3, self.evaluate(error))
| MeanAbsoluteErrorTest |
python | tensorflow__tensorflow | tensorflow/python/keras/initializers/initializers_v1.py | {
"start": 1414,
"end": 1662
} | class ____(init_ops.RandomUniform):
def __init__(self, minval=-0.05, maxval=0.05, seed=None,
dtype=dtypes.float32):
super(RandomUniform, self).__init__(
minval=minval, maxval=maxval, seed=seed, dtype=dtype)
| RandomUniform |
python | apache__airflow | providers/ydb/tests/unit/ydb/operators/test_ydb.py | {
"start": 2121,
"end": 2263
} | class ____:
def __init__(self, *args):
self.table_client = FakeTableClient()
def wait(*args, **kwargs):
pass
| FakeDriver |
python | pypa__setuptools | setuptools/tests/config/test_apply_pyprojecttoml.py | {
"start": 25710,
"end": 25979
} | class ____:
def test_example_file_in_sdist(self, setuptools_sdist):
"""Meta test to ensure tests can run from sdist"""
with tarfile.open(setuptools_sdist) as tar:
assert any(name.endswith(EXAMPLES_FILE) for name in tar.getnames())
| TestMeta |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 56142,
"end": 56765
} | class ____(ObjectBaseModel):
flow_run_id: UUID = Field(description="The flow run ID associated with the input.")
key: Annotated[str, AfterValidator(raise_on_name_alphanumeric_dashes_only)] = Field(
description="The key of the input."
)
value: str = Field(description="The value of the input.")
sender: Optional[str] = Field(default=None, description="The sender of the input.")
@property
def decoded_value(self) -> Any:
"""
Decode the value of the input.
Returns:
Any: the decoded value
"""
return orjson.loads(self.value)
| FlowRunInput |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 116811,
"end": 117176
} | class ____(sgqlc.types.Enum):
"""Properties by which team discussion comment connections can be
ordered.
Enumeration Choices:
* `NUMBER`: Allows sequential ordering of team discussion comments
(which is equivalent to chronological ordering).
"""
__schema__ = github_schema
__choices__ = ("NUMBER",)
| TeamDiscussionCommentOrderField |
python | prabhupant__python-ds | data_structures/bst/average_of_levels.py | {
"start": 82,
"end": 697
} | class ____():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def mean(arr):
m = 0
for x in arr:
m += x
return m / len(arr)
def bfs(root):
if not root:
return
queue = collections.deque([root])
result = []
while queue:
next_queue = collections.deque()
for node in queue:
if node.left:
next_queue.append(node.left)
if node.right:
next_queue.append(node.right)
result.append(mean(queue))
queue = next_queue
return result
| Node |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/pyodbc.py | {
"start": 2348,
"end": 2676
} | class ____(MySQLExecutionContext):
def get_lastrowid(self) -> int:
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0] # type: ignore[index]
cursor.close()
return lastrowid # type: ignore[no-any-return]
| MySQLExecutionContext_pyodbc |
python | getsentry__sentry | src/sentry_plugins/redmine/plugin.py | {
"start": 837,
"end": 9157
} | class ____(CorePluginMixin, IssuePlugin):
author = "Sentry"
author_url = "https://github.com/getsentry/sentry"
version = sentry.VERSION
description = DESCRIPTION
slug = "redmine"
title = _("Redmine")
conf_title = "Redmine"
conf_key = "redmine"
required_field = "host"
feature_descriptions = [
FeatureDescription(
"""
Create and link Sentry issue groups directly to an Redmine issue in any of your
projects, providing a quick way to jump from a Sentry bug to tracked ticket.
""",
IntegrationFeatures.ISSUE_BASIC,
),
FeatureDescription(
"""
Link Sentry issues to existing Redmine issue.
""",
IntegrationFeatures.ISSUE_BASIC,
),
]
new_issue_form = RedmineNewIssueForm
def __init__(self):
super().__init__()
self.client_errors = []
self.fields = []
def has_project_conf(self) -> bool:
return True
def is_configured(self, project) -> bool:
return all(self.get_option(k, project) for k in ("host", "key", "project_id"))
def get_new_issue_title(self, **kwargs) -> str:
return "Create Redmine Task"
def get_initial_form_data(self, request: Request, group, event, **kwargs):
return {
"description": self._get_group_description(group, event),
"title": self._get_group_title(group, event),
}
def _get_group_description(self, group, event):
output = [absolute_uri(group.get_absolute_url())]
body = self._get_group_body(group, event)
if body:
output.extend(["", "<pre>", body, "</pre>"])
return "\n".join(output)
def get_client(self, project):
return RedmineClient(
host=self.get_option("host", project), key=self.get_option("key", project)
)
def create_issue(self, request, group, form_data):
"""
Create a Redmine issue
"""
client = self.get_client(group.project)
default_priority = self.get_option("default_priority", group.project)
if default_priority is None:
default_priority = 4
issue_dict = {
"project_id": self.get_option("project_id", group.project),
"tracker_id": self.get_option("tracker_id", group.project),
"priority_id": default_priority,
"subject": form_data["title"].encode("utf-8"),
"description": form_data["description"].encode("utf-8"),
}
extra_fields_str = self.get_option("extra_fields", group.project)
if extra_fields_str:
extra_fields = json.loads(extra_fields_str)
else:
extra_fields = {}
issue_dict.update(extra_fields)
response = client.create_issue(issue_dict)
return response["issue"]["id"]
def get_issue_url(self, group, issue_id: str) -> str:
host = self.get_option("host", group.project)
return "{}/issues/{}".format(host.rstrip("/"), issue_id)
def build_config(self, project):
host = {
"name": "host",
"label": "Host",
"type": "text",
"help": "e.g. http://bugs.redmine.org",
"required": True,
}
key = get_secret_field_config(
name="key",
label="Key",
secret=self.get_option("key", project),
help="Your API key is available on your account page after enabling the Rest API (Administration -> Settings -> Authentication)",
required=True,
)
project_id = {
"name": "project_id",
"label": "Project*",
"type": "select",
"choices": [],
"required": False,
}
tracker_id = {
"name": "tracker_id",
"label": "Tracker*",
"type": "select",
"choices": [],
"required": False,
}
default_priority = {
"name": "default_priority",
"label": "Default Priority*",
"type": "select",
"choices": [],
"required": False,
}
extra_fields = {
"name": "extra_fields",
"label": "Extra Fields",
"type": "text",
"help": "Extra attributes (custom fields, status id, etc.) in JSON format",
"required": False,
}
return [host, key, project_id, tracker_id, default_priority, extra_fields]
def add_choices(self, field_name, choices, default):
for field in self.fields:
if field_name == field["name"]:
field["choices"] = choices
field["default"] = default
return
def remove_field(self, field_name):
for field in self.fields:
if field["name"] == field_name:
self.fields.remove(field)
return
def build_initial(self, initial_args, project):
initial = {}
fields = ["host", "key", "project_id", "tracker_id", "default_priority", "extra_fields"]
for field in fields:
value = initial_args.get(field) or self.get_option(field, project)
if value is not None:
initial[field] = value
return initial
def get_config(self, project, user=None, initial=None, add_additional_fields: bool = False):
self.client_errors = []
self.fields = self.build_config(project)
initial_args = initial or {}
initial = self.build_initial(initial_args, project)
has_credentials = all(initial.get(k) for k in ("host", "key"))
client = None
if has_credentials:
client = RedmineClient(initial["host"], initial["key"])
try:
projects = client.get_projects()
except Exception:
has_credentials = False
self.client_errors.append("There was an issue authenticating with Redmine")
else:
choices_value = self.get_option("project_id", project)
project_choices = [("", "--")] if not choices_value else []
project_choices += [
(p["id"], "{} ({})".format(p["name"], p["identifier"]))
for p in projects["projects"]
]
self.add_choices("project_id", project_choices, choices_value)
if client is not None and has_credentials:
try:
trackers = client.get_trackers()
except Exception:
self.remove_field("tracker_id")
else:
choices_value = self.get_option("tracker_id", project)
tracker_choices = [("", "--")] if not choices_value else []
tracker_choices += [(p["id"], p["name"]) for p in trackers["trackers"]]
self.add_choices("tracker_id", tracker_choices, choices_value)
try:
priorities = client.get_priorities()
except Exception:
self.remove_field("default_priority")
else:
choices_value = self.get_option("default_priority", project)
tracker_choices = [("", "--")] if not choices_value else []
tracker_choices += [(p["id"], p["name"]) for p in priorities["issue_priorities"]]
self.add_choices("default_priority", tracker_choices, choices_value)
if not has_credentials:
for field_name in ["project_id", "tracker_id", "default_priority", "extra_fields"]:
self.remove_field(field_name)
return self.fields
def validate_config(self, project, config, actor=None):
super().validate_config(project, config, actor)
self.client_errors = []
for field in self.fields:
if field["name"] in ["project_id", "tracker_id", "default_priority"]:
if not config[field["name"]]:
self.logger.exception(str("{} required.".format(field["name"])))
self.client_errors.append(field["name"])
if self.client_errors:
raise PluginError(", ".join(self.client_errors) + " required.")
return config
| RedminePlugin |
python | facebook__pyre-check | tools/pysa_integration_tests/runner_lib.py | {
"start": 18050,
"end": 21889
} | class ____:
def __init__(self) -> None:
self.functions: Dict[str, Union[ast.FunctionDef, ast.AsyncFunctionDef]] = {}
def add_function(
self, name: str, function: Union[ast.FunctionDef, ast.AsyncFunctionDef]
) -> None:
self.functions[name] = function
@staticmethod
def from_ast(parsed_ast: ast.AST) -> "FunctionDefinitions":
functions = FunctionDefinitions()
functions.add_from_statements(
parsed_ast.body, # pyre-ignore: _ast.AST has no attribute body
prefix="",
)
return functions
def add_from_statements(self, statements: List[ast.stmt], prefix: str) -> None:
for statement in statements:
if isinstance(statement, (ast.FunctionDef, ast.AsyncFunctionDef)):
self.add_function(f"{prefix}{statement.name}", statement)
self.add_from_statements(
statement.body, prefix=f"{prefix}{statement.name}."
)
elif isinstance(statement, ast.ClassDef):
self.add_from_statements(
statement.body, prefix=f"{prefix}{statement.name}."
)
def parse_test_annotations_from_source(
source: str,
) -> Dict[str, FunctionTestAnnotations]:
parsed_ast = ast.parse(source)
functions = FunctionDefinitions.from_ast(parsed_ast)
annotated_functions: Dict[str, FunctionTestAnnotations] = {}
for qualified_name, function in functions.functions.items():
annotations: List[TestAnnotation] = []
for decorator_expression in function.decorator_list:
if not isinstance(decorator_expression, ast.Call):
continue
annotation = parse_test_annotation(decorator_expression, function.name)
if annotation is not None:
annotations.append(annotation)
if len(annotations) > 0:
annotated_functions[qualified_name] = FunctionTestAnnotations(
definition_line=function.lineno, annotations=annotations
)
# Sanity check that we parsed all test annotations.
number_expect_issue_substrings = source.count("@ExpectIssue(")
number_expect_no_issue_substrings = source.count("@ExpectNoIssue(")
number_parsed_annotations = sum(
len(function_annotation.annotations)
for function_annotation in annotated_functions.values()
)
if (
number_expect_issue_substrings + number_expect_no_issue_substrings
!= number_parsed_annotations
):
raise TestConfigurationException(
"Unexpected mismatch between '@ExpectIssue' and parsed annotations:\n"
+ f"Found {number_expect_issue_substrings} @ExpectIssue\n"
+ f"Found {number_expect_no_issue_substrings} @ExpectNoIssue\n"
+ f"Parsed {number_parsed_annotations} test annotations"
)
return annotated_functions
def parse_test_annotations_from_directory(
directory: Path, repository_root: Path
) -> DirectoryTestAnnotations:
LOG.info(f"Parsing test annotations in {directory}")
result = DirectoryTestAnnotations()
for path in directory.glob("**/*.py"):
if path.name == "runner_lib.py":
continue
base_module = ".".join(path.relative_to(repository_root).parts)
base_module = base_module[:-3] # Remove .py suffix
for function_name, annotations in parse_test_annotations_from_source(
path.read_text()
).items():
result.set(f"{base_module}.{function_name}", annotations)
LOG.info(f"Found {result.number_annotations()} test annotations")
if result.number_annotations() == 0:
raise TestConfigurationException(
f"Could NOT find test annotations in {directory}"
)
return result
| FunctionDefinitions |
python | sympy__sympy | sympy/solvers/diophantine/diophantine.py | {
"start": 21970,
"end": 22434
} | class ____(DiophantineEquationType):
"""
Representation of an inhomogeneous ternary quadratic.
No solver is currently implemented for this equation type.
"""
name = 'inhomogeneous_ternary_quadratic'
def matches(self):
if not (self.total_degree == 2 and self.dimension == 3):
return False
if not self.homogeneous:
return False
return not self.homogeneous_order
| InhomogeneousTernaryQuadratic |
python | pytorch__pytorch | test/test_fx.py | {
"start": 167284,
"end": 177291
} | class ____(JitTestCase):
def setUp(self):
super().setUp()
# Checking for mutable operations while tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = (
torch.fx.proxy.TracerBase.check_mutable_operations
)
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
super().tearDown()
torch.fx.proxy.TracerBase.check_mutable_operations = (
self.orig_tracer_mutable_flag
)
IGNORE_FUNCS = (
"has_torch_function",
"has_torch_function_unary",
"has_torch_function_variadic",
"handle_torch_function",
"boolean_dispatch",
)
TO_PATCH = {
"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None,
}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (
TraceError,
r"symbolically traced variables cannot be used as inputs to control flow",
)
INTERPOLATE_ARGS_CONFLICT = (
ValueError,
r"only one of size or scale_factor should be defined",
)
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"scaled_dot_product_attention": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"lp_pool2d": PROXY_ITERATED,
"lp_pool3d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"fold": PROXY_ITERATED,
"unfold": PROXY_ITERATED,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout1d": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"group_norm": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": CONTROL_FLOW,
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith("_"):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for param in sig.parameters.values():
if isinstance(param.annotation, type) and issubclass(
param.annotation, torch.Tensor
):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if (
func_name in self.UNTRACEABLE_FUNCTIONALS_PY38
and sys.version_info < (3, 12)
):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH:
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH:
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfTorchDynamo("too slow")
@skipIfNoTorchVision
| TestFunctionalTracing |
python | plotly__plotly.py | plotly/matplotlylib/renderer.py | {
"start": 573,
"end": 34051
} | class ____(Renderer):
"""A renderer class inheriting from base for rendering mpl plots in plotly.
A renderer class to be used with an exporter for rendering matplotlib
plots in Plotly. This module defines the PlotlyRenderer class which handles
the creation of the JSON structures that get sent to plotly.
All class attributes available are defined in __init__().
Basic Usage:
# (mpl code) #
fig = gcf()
renderer = PlotlyRenderer(fig)
exporter = Exporter(renderer)
exporter.run(fig) # ... et voila
"""
def __init__(self):
"""Initialize PlotlyRenderer obj.
PlotlyRenderer obj is called on by an Exporter object to draw
matplotlib objects like figures, axes, text, etc.
All class attributes are listed here in the __init__ method.
"""
self.plotly_fig = go.Figure()
self.mpl_fig = None
self.current_mpl_ax = None
self.bar_containers = None
self.current_bars = []
self.axis_ct = 0
self.x_is_mpl_date = False
self.mpl_x_bounds = (0, 1)
self.mpl_y_bounds = (0, 1)
self.msg = "Initialized PlotlyRenderer\n"
self._processing_legend = False
self._legend_visible = False
def open_figure(self, fig, props):
"""Creates a new figure by beginning to fill out layout dict.
The 'autosize' key is set to false so that the figure will mirror
sizes set by mpl. The 'hovermode' key controls what shows up when you
mouse around a figure in plotly, it's set to show the 'closest' point.
Positional agurments:
fig -- a matplotlib.figure.Figure object.
props.keys(): [
'figwidth',
'figheight',
'dpi'
]
"""
self.msg += "Opening figure\n"
self.mpl_fig = fig
self.plotly_fig["layout"] = go.Layout(
width=int(props["figwidth"] * props["dpi"]),
height=int(props["figheight"] * props["dpi"]),
autosize=False,
hovermode="closest",
)
self.mpl_x_bounds, self.mpl_y_bounds = mpltools.get_axes_bounds(fig)
margin = go.layout.Margin(
l=int(self.mpl_x_bounds[0] * self.plotly_fig["layout"]["width"]),
r=int((1 - self.mpl_x_bounds[1]) * self.plotly_fig["layout"]["width"]),
t=int((1 - self.mpl_y_bounds[1]) * self.plotly_fig["layout"]["height"]),
b=int(self.mpl_y_bounds[0] * self.plotly_fig["layout"]["height"]),
pad=0,
)
self.plotly_fig["layout"]["margin"] = margin
def close_figure(self, fig):
"""Closes figure by cleaning up data and layout dictionaries.
The PlotlyRenderer's job is to create an appropriate set of data and
layout dictionaries. When the figure is closed, some cleanup and
repair is necessary. This method removes inappropriate dictionary
entries, freeing up Plotly to use defaults and best judgements to
complete the entries. This method is called by an Exporter object.
Positional arguments:
fig -- a matplotlib.figure.Figure object.
"""
self.msg += "Closing figure\n"
def open_axes(self, ax, props):
"""Setup a new axes object (subplot in plotly).
Plotly stores information about subplots in different 'xaxis' and
'yaxis' objects which are numbered. These are just dictionaries
included in the layout dictionary. This function takes information
from the Exporter, fills in appropriate dictionary entries,
and updates the layout dictionary. PlotlyRenderer keeps track of the
number of plots by incrementing the axis_ct attribute.
Setting the proper plot domain in plotly is a bit tricky. Refer to
the documentation for mpltools.convert_x_domain and
mpltools.convert_y_domain.
Positional arguments:
ax -- an mpl axes object. This will become a subplot in plotly.
props.keys() -- [
'axesbg', (background color for axes obj)
'axesbgalpha', (alpha, or opacity for background)
'bounds', ((x0, y0, width, height) for axes)
'dynamic', (zoom/pan-able?)
'axes', (list: [xaxis, yaxis])
'xscale', (log, linear, or date)
'yscale',
'xlim', (range limits for x)
'ylim',
'xdomain' (xdomain=xlim, unless it's a date)
'ydomain'
]
"""
self.msg += " Opening axes\n"
self.current_mpl_ax = ax
self.bar_containers = [
c
for c in ax.containers # empty is OK
if c.__class__.__name__ == "BarContainer"
]
self.current_bars = []
self.axis_ct += 1
# set defaults in axes
xaxis = go.layout.XAxis(
anchor="y{0}".format(self.axis_ct), zeroline=False, ticks="inside"
)
yaxis = go.layout.YAxis(
anchor="x{0}".format(self.axis_ct), zeroline=False, ticks="inside"
)
# update defaults with things set in mpl
mpl_xaxis, mpl_yaxis = mpltools.prep_xy_axis(
ax=ax, props=props, x_bounds=self.mpl_x_bounds, y_bounds=self.mpl_y_bounds
)
xaxis.update(mpl_xaxis)
yaxis.update(mpl_yaxis)
bottom_spine = mpltools.get_spine_visible(ax, "bottom")
top_spine = mpltools.get_spine_visible(ax, "top")
left_spine = mpltools.get_spine_visible(ax, "left")
right_spine = mpltools.get_spine_visible(ax, "right")
xaxis["mirror"] = mpltools.get_axis_mirror(bottom_spine, top_spine)
yaxis["mirror"] = mpltools.get_axis_mirror(left_spine, right_spine)
xaxis["showline"] = bottom_spine
yaxis["showline"] = top_spine
# put axes in our figure
self.plotly_fig["layout"]["xaxis{0}".format(self.axis_ct)] = xaxis
self.plotly_fig["layout"]["yaxis{0}".format(self.axis_ct)] = yaxis
# let all subsequent dates be handled properly if required
if "type" in dir(xaxis) and xaxis["type"] == "date":
self.x_is_mpl_date = True
def close_axes(self, ax):
"""Close the axes object and clean up.
Bars from bar charts are given to PlotlyRenderer one-by-one,
thus they need to be taken care of at the close of each axes object.
The self.current_bars variable should be empty unless a bar
chart has been created.
Positional arguments:
ax -- an mpl axes object, not required at this time.
"""
self.draw_bars(self.current_bars)
self.msg += " Closing axes\n"
self.x_is_mpl_date = False
def open_legend(self, legend, props):
"""Enable Plotly's native legend when matplotlib legend is detected.
This method is called when a matplotlib legend is found. It enables
Plotly's showlegend only if the matplotlib legend is visible.
Positional arguments:
legend -- matplotlib.legend.Legend object
props -- legend properties dictionary
"""
self.msg += " Opening legend\n"
self._processing_legend = True
self._legend_visible = props.get("visible", True)
if self._legend_visible:
self.msg += (
" Enabling native plotly legend (matplotlib legend is visible)\n"
)
self.plotly_fig["layout"]["showlegend"] = True
else:
self.msg += " Not enabling legend (matplotlib legend is not visible)\n"
def close_legend(self, legend):
"""Finalize legend processing.
Positional arguments:
legend -- matplotlib.legend.Legend object
"""
self.msg += " Closing legend\n"
self._processing_legend = False
self._legend_visible = False
def draw_bars(self, bars):
# sort bars according to bar containers
mpl_traces = []
for container in self.bar_containers:
mpl_traces.append(
[
bar_props
for bar_props in self.current_bars
if bar_props["mplobj"] in container
]
)
for trace in mpl_traces:
self.draw_bar(trace)
def draw_bar(self, coll):
"""Draw a collection of similar patches as a bar chart.
After bars are sorted, an appropriate data dictionary must be created
to tell plotly about this data. Just like draw_line or draw_markers,
draw_bar translates patch/path information into something plotly
understands.
Positional arguments:
patch_coll -- a collection of patches to be drawn as a bar chart.
"""
tol = 1e-10
trace = [mpltools.make_bar(**bar_props) for bar_props in coll]
widths = [bar_props["x1"] - bar_props["x0"] for bar_props in trace]
heights = [bar_props["y1"] - bar_props["y0"] for bar_props in trace]
vertical = abs(sum(widths[0] - widths[iii] for iii in range(len(widths)))) < tol
horizontal = (
abs(sum(heights[0] - heights[iii] for iii in range(len(heights)))) < tol
)
if vertical and horizontal:
# Check for monotonic x. Can't both be true!
x_zeros = [bar_props["x0"] for bar_props in trace]
if all(
(x_zeros[iii + 1] > x_zeros[iii] for iii in range(len(x_zeros[:-1])))
):
orientation = "v"
else:
orientation = "h"
elif vertical:
orientation = "v"
else:
orientation = "h"
if orientation == "v":
self.msg += " Attempting to draw a vertical bar chart\n"
old_heights = [bar_props["y1"] for bar_props in trace]
for bar in trace:
bar["y0"], bar["y1"] = 0, bar["y1"] - bar["y0"]
new_heights = [bar_props["y1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_heights, new_heights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "x"
x = [bar["x0"] + (bar["x1"] - bar["x0"]) / 2 for bar in trace]
y = [bar["y1"] for bar in trace]
bar_gap = mpltools.get_bar_gap(
[bar["x0"] for bar in trace], [bar["x1"] for bar in trace]
)
if self.x_is_mpl_date:
x = [bar["x0"] for bar in trace]
formatter = (
self.current_mpl_ax.get_xaxis()
.get_major_formatter()
.__class__.__name__
)
x = mpltools.mpl_dates_to_datestrings(x, formatter)
else:
self.msg += " Attempting to draw a horizontal bar chart\n"
old_rights = [bar_props["x1"] for bar_props in trace]
for bar in trace:
bar["x0"], bar["x1"] = 0, bar["x1"] - bar["x0"]
new_rights = [bar_props["x1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_rights, new_rights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "y"
x = [bar["x1"] for bar in trace]
y = [bar["y0"] + (bar["y1"] - bar["y0"]) / 2 for bar in trace]
bar_gap = mpltools.get_bar_gap(
[bar["y0"] for bar in trace], [bar["y1"] for bar in trace]
)
bar = go.Bar(
orientation=orientation,
x=x,
y=y,
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
opacity=trace[0]["alpha"], # TODO: get all alphas if array?
marker=go.bar.Marker(
color=trace[0]["facecolor"], # TODO: get all
line=dict(width=trace[0]["edgewidth"]),
),
) # TODO ditto
if len(bar["x"]) > 1:
self.msg += " Heck yeah, I drew that bar chart\n"
self.plotly_fig.add_trace(bar)
if bar_gap is not None:
self.plotly_fig["layout"]["bargap"] = bar_gap
else:
self.msg += " Bar chart not drawn\n"
warnings.warn(
"found box chart data with length <= 1, "
"assuming data redundancy, not plotting."
)
def draw_marked_line(self, **props):
"""Create a data dict for a line obj.
This will draw 'lines', 'markers', or 'lines+markers'. For legend elements,
this will use layout.shapes, so they can be positioned with paper refs.
props.keys() -- [
'coordinates', ('data', 'axes', 'figure', or 'display')
'data', (a list of xy pairs)
'mplobj', (the matplotlib.lines.Line2D obj being rendered)
'label', (the name of the Line2D obj being rendered)
'linestyle', (linestyle dict, can be None, see below)
'markerstyle', (markerstyle dict, can be None, see below)
]
props['linestyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'color', (color of the line if it exists, not the marker)
'linewidth',
'dasharray', (code for linestyle, see DASH_MAP in mpltools.py)
'zorder', (viewing precedence when stacked with other objects)
]
props['markerstyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'marker', (the mpl marker symbol, see SYMBOL_MAP in mpltools.py)
'facecolor', (color of the marker face)
'edgecolor', (color of the marker edge)
'edgewidth', (width of marker edge)
'markerpath', (an SVG path for drawing the specified marker)
'zorder', (viewing precedence when stacked with other objects)
]
"""
self.msg += " Attempting to draw a line "
line, marker, shape = {}, {}, {}
if props["linestyle"] and props["markerstyle"]:
self.msg += "... with both lines+markers\n"
mode = "lines+markers"
elif props["linestyle"]:
self.msg += "... with just lines\n"
mode = "lines"
elif props["markerstyle"]:
self.msg += "... with just markers\n"
mode = "markers"
if props["linestyle"]:
color = mpltools.merge_color_and_opacity(
props["linestyle"]["color"], props["linestyle"]["alpha"]
)
if props["coordinates"] == "data":
line = go.scatter.Line(
color=color,
width=props["linestyle"]["linewidth"],
dash=mpltools.convert_dash(props["linestyle"]["dasharray"]),
)
else:
shape = dict(
line=dict(
color=color,
width=props["linestyle"]["linewidth"],
dash=mpltools.convert_dash(props["linestyle"]["dasharray"]),
)
)
if props["markerstyle"]:
if props["coordinates"] == "data":
marker = go.scatter.Marker(
opacity=props["markerstyle"]["alpha"],
color=props["markerstyle"]["facecolor"],
symbol=mpltools.convert_symbol(props["markerstyle"]["marker"]),
size=props["markerstyle"]["markersize"],
line=dict(
color=props["markerstyle"]["edgecolor"],
width=props["markerstyle"]["edgewidth"],
),
)
else:
shape = dict(
opacity=props["markerstyle"]["alpha"],
fillcolor=props["markerstyle"]["facecolor"],
symbol=mpltools.convert_symbol(props["markerstyle"]["marker"]),
size=props["markerstyle"]["markersize"],
line=dict(
color=props["markerstyle"]["edgecolor"],
width=props["markerstyle"]["edgewidth"],
),
)
if props["coordinates"] == "data":
marked_line = go.Scatter(
mode=mode,
name=(
str(props["label"])
if isinstance(props["label"], str)
else props["label"]
),
x=[xy_pair[0] for xy_pair in props["data"]],
y=[xy_pair[1] for xy_pair in props["data"]],
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
line=line,
marker=marker,
)
if self.x_is_mpl_date:
formatter = (
self.current_mpl_ax.get_xaxis()
.get_major_formatter()
.__class__.__name__
)
marked_line["x"] = mpltools.mpl_dates_to_datestrings(
marked_line["x"], formatter
)
self.plotly_fig.add_trace(marked_line)
self.msg += " Heck yeah, I drew that line\n"
elif props["coordinates"] == "axes":
# dealing with legend graphical elements
self.msg += " Using native legend\n"
else:
self.msg += " Line didn't have 'data' coordinates, not drawing\n"
warnings.warn(
"Bummer! Plotly can currently only draw Line2D "
"objects from matplotlib that are in 'data' "
"coordinates!"
)
def draw_image(self, **props):
"""Draw image.
Not implemented yet!
"""
self.msg += " Attempting to draw image\n"
self.msg += " Not drawing image\n"
warnings.warn(
"Aw. Snap! You're gonna have to hold off on "
"the selfies for now. Plotly can't import "
"images from matplotlib yet!"
)
def draw_path_collection(self, **props):
"""Add a path collection to data list as a scatter plot.
Current implementation defaults such collections as scatter plots.
Matplotlib supports collections that have many of the same parameters
in common like color, size, path, etc. However, they needn't all be
the same. Plotly does not currently support such functionality and
therefore, the style for the first object is taken and used to define
the remaining paths in the collection.
props.keys() -- [
'paths', (structure: [vertices, path_code])
'path_coordinates', ('data', 'axes', 'figure', or 'display')
'path_transforms', (mpl transform, including Affine2D matrix)
'offsets', (offset from axes, helpful if in 'data')
'offset_coordinates', ('data', 'axes', 'figure', or 'display')
'offset_order',
'styles', (style dict, see below)
'mplobj' (the collection obj being drawn)
]
props['styles'].keys() -- [
'linewidth', (one or more linewidths)
'facecolor', (one or more facecolors for path)
'edgecolor', (one or more edgecolors for path)
'alpha', (one or more opacites for path)
'zorder', (precedence when stacked)
]
"""
self.msg += " Attempting to draw a path collection\n"
if props["offset_coordinates"] == "data":
markerstyle = mpltools.get_markerstyle_from_collection(props)
scatter_props = {
"coordinates": "data",
"data": props["offsets"],
"label": None,
"markerstyle": markerstyle,
"linestyle": None,
}
self.msg += " Drawing path collection as markers\n"
self.draw_marked_line(**scatter_props)
else:
self.msg += " Path collection not linked to 'data', not drawing\n"
warnings.warn(
"Dang! That path collection is out of this "
"world. I totally don't know what to do with "
"it yet! Plotly can only import path "
"collections linked to 'data' coordinates"
)
def draw_path(self, **props):
"""Draw path, currently only attempts to draw bar charts.
This function attempts to sort a given path into a collection of
horizontal or vertical bar charts. Most of the actual code takes
place in functions from mpltools.py.
props.keys() -- [
'data', (a list of verticies for the path)
'coordinates', ('data', 'axes', 'figure', or 'display')
'pathcodes', (code for the path, structure: ['M', 'L', 'Z', etc.])
'style', (style dict, see below)
'mplobj' (the mpl path object)
]
props['style'].keys() -- [
'alpha', (opacity of path obj)
'edgecolor',
'facecolor',
'edgewidth',
'dasharray', (style for path's enclosing line)
'zorder' (precedence of obj when stacked)
]
"""
self.msg += " Attempting to draw a path\n"
is_bar = mpltools.is_bar(self.current_mpl_ax.containers, **props)
if is_bar:
self.current_bars += [props]
else:
self.msg += " This path isn't a bar, not drawing\n"
warnings.warn(
"I found a path object that I don't think is part "
"of a bar chart. Ignoring."
)
def draw_text(self, **props):
"""Create an annotation dict for a text obj.
Currently, plotly uses either 'page' or 'data' to reference
annotation locations. These refer to 'display' and 'data',
respectively for the 'coordinates' key used in the Exporter.
Appropriate measures are taken to transform text locations to
reference one of these two options.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw an mpl text object\n"
if not mpltools.check_corners(props["mplobj"], self.mpl_fig):
warnings.warn(
"Looks like the annotation(s) you are trying \n"
"to draw lies/lay outside the given figure size.\n\n"
"Therefore, the resulting Plotly figure may not be \n"
"large enough to view the full text. To adjust \n"
"the size of the figure, use the 'width' and \n"
"'height' keys in the Layout object. Alternatively,\n"
"use the Margin object to adjust the figure's margins."
)
align = props["mplobj"]._multialignment
if not align:
align = props["style"]["halign"] # mpl default
if "annotations" not in self.plotly_fig["layout"]:
self.plotly_fig["layout"]["annotations"] = []
if props["text_type"] == "xlabel":
self.msg += " Text object is an xlabel\n"
self.draw_xlabel(**props)
elif props["text_type"] == "ylabel":
self.msg += " Text object is a ylabel\n"
self.draw_ylabel(**props)
elif props["text_type"] == "title":
self.msg += " Text object is a title\n"
self.draw_title(**props)
else: # just a regular text annotation...
self.msg += " Text object is a normal annotation\n"
# Skip creating annotations for legend text when using native legend
if (
self._processing_legend
and self._legend_visible
and props["coordinates"] == "axes"
):
self.msg += (
" Skipping legend text annotation (using native legend)\n"
)
return
if props["coordinates"] != "data":
self.msg += " Text object isn't linked to 'data' coordinates\n"
x_px, y_px = (
props["mplobj"].get_transform().transform(props["position"])
)
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
else:
self.msg += " Text object is linked to 'data' coordinates\n"
x, y = props["position"]
axis_ct = self.axis_ct
xaxis = self.plotly_fig["layout"]["xaxis{0}".format(axis_ct)]
yaxis = self.plotly_fig["layout"]["yaxis{0}".format(axis_ct)]
if (
xaxis["range"][0] < x < xaxis["range"][1]
and yaxis["range"][0] < y < yaxis["range"][1]
):
xref = "x{0}".format(self.axis_ct)
yref = "y{0}".format(self.axis_ct)
else:
self.msg += (
" Text object is outside "
"plotting area, making 'paper' reference.\n"
)
x_px, y_px = (
props["mplobj"].get_transform().transform(props["position"])
)
x, y = mpltools.display_to_paper(
x_px, y_px, self.plotly_fig["layout"]
)
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
annotation = go.layout.Annotation(
text=(
str(props["text"])
if isinstance(props["text"], str)
else props["text"]
),
opacity=props["style"]["alpha"],
x=x,
y=y,
xref=xref,
yref=yref,
align=align,
xanchor=xanchor,
yanchor=yanchor,
showarrow=False, # change this later?
font=go.layout.annotation.Font(
color=props["style"]["color"], size=props["style"]["fontsize"]
),
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
self.msg += " Heck, yeah I drew that annotation\n"
def draw_title(self, **props):
"""Add a title to the current subplot in layout dictionary.
If there exists more than a single plot in the figure, titles revert
to 'page'-referenced annotations.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw a title\n"
if len(self.mpl_fig.axes) > 1:
self.msg += " More than one subplot, adding title as annotation\n"
x_px, y_px = props["mplobj"].get_transform().transform(props["position"])
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
annotation = go.layout.Annotation(
text=props["text"],
font=go.layout.annotation.Font(
color=props["style"]["color"], size=props["style"]["fontsize"]
),
xref="paper",
yref="paper",
x=x,
y=y,
xanchor="center",
yanchor="bottom",
showarrow=False, # no arrow for a title!
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
else:
self.msg += " Only one subplot found, adding as a plotly title\n"
self.plotly_fig["layout"]["title"] = props["text"]
title_font = dict(
size=props["style"]["fontsize"], color=props["style"]["color"]
)
self.plotly_fig["layout"]["title_font"] = title_font
def draw_xlabel(self, **props):
"""Add an xaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding xlabel\n"
axis_key = "xaxis{0}".format(self.axis_ct)
self.plotly_fig["layout"][axis_key]["title"] = str(props["text"])
title_font = dict(
size=props["style"]["fontsize"], color=props["style"]["color"]
)
self.plotly_fig["layout"][axis_key]["title_font"] = title_font
def draw_ylabel(self, **props):
"""Add a yaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding ylabel\n"
axis_key = "yaxis{0}".format(self.axis_ct)
self.plotly_fig["layout"][axis_key]["title"] = props["text"]
title_font = dict(
size=props["style"]["fontsize"], color=props["style"]["color"]
)
self.plotly_fig["layout"][axis_key]["title_font"] = title_font
def resize(self):
"""Revert figure layout to allow plotly to resize.
By default, PlotlyRenderer tries its hardest to precisely mimic an
mpl figure. However, plotly is pretty good with aesthetics. By
running PlotlyRenderer.resize(), layout parameters are deleted. This
lets plotly choose them instead of mpl.
"""
self.msg += "Resizing figure, deleting keys from layout\n"
for key in ["width", "height", "autosize", "margin"]:
try:
del self.plotly_fig["layout"][key]
except (KeyError, AttributeError):
pass
def strip_style(self):
self.msg += "Stripping mpl style is no longer supported\n"
| PlotlyRenderer |
python | nryoung__algorithms | tests/test_sorting.py | {
"start": 3932,
"end": 4176
} | class ____(SortingAlgorithmTestCase):
"""
Test Shell sort on a small range from 0-9
"""
def test_shellsort(self):
self.output = shell_sort.sort(self.input)
self.assertEqual(self.correct, self.output)
| TestShellSort |
python | spack__spack | lib/spack/spack/util/web.py | {
"start": 2254,
"end": 2435
} | class ____(HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
raise DetailedHTTPError(req, code, msg, hdrs, fp)
| SpackHTTPDefaultErrorHandler |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_sagemaker_training.py | {
"start": 2681,
"end": 9307
} | class ____:
def setup_method(self):
self.sagemaker = SageMakerTrainingOperator(
task_id="test_sagemaker_operator",
config=CREATE_TRAINING_PARAMS.copy(),
wait_for_completion=False,
check_interval=5,
)
@mock.patch.object(SageMakerHook, "describe_training_job")
@mock.patch.object(SageMakerHook, "create_training_job")
@mock.patch.object(sagemaker, "serialize", return_value="")
def test_integer_fields(self, _, mock_training, mock_desc):
mock_desc.side_effect = [ClientError({"Error": {"Code": "ValidationException"}}, "op"), None]
mock_training.return_value = {
"TrainingJobArn": "test_arn",
"ResponseMetadata": {"HTTPStatusCode": 200},
}
self.sagemaker.execute(None)
assert self.sagemaker.integer_fields == EXPECTED_INTEGER_FIELDS
for key1, key2 in EXPECTED_INTEGER_FIELDS:
assert self.sagemaker.config[key1][key2] == int(self.sagemaker.config[key1][key2])
@mock.patch.object(SageMakerHook, "describe_training_job")
@mock.patch.object(SageMakerHook, "create_training_job")
@mock.patch.object(sagemaker, "serialize", return_value="")
def test_execute_without_check_if_job_exists(self, _, mock_training, mock_desc):
mock_training.return_value = {
"TrainingJobArn": "test_arn",
"ResponseMetadata": {"HTTPStatusCode": 200},
}
self.sagemaker.check_if_job_exists = False
self.sagemaker._check_if_job_exists = mock.MagicMock()
self.sagemaker.execute(None)
self.sagemaker._check_if_job_exists.assert_not_called()
mock_training.assert_called_once_with(
CREATE_TRAINING_PARAMS,
wait_for_completion=False,
print_log=True,
check_interval=5,
max_ingestion_time=None,
)
a = []
a.sort()
@mock.patch.object(SageMakerHook, "describe_training_job")
@mock.patch.object(SageMakerHook, "create_training_job")
def test_execute_with_failure(self, mock_training, mock_desc):
mock_desc.side_effect = [ClientError({"Error": {"Code": "ValidationException"}}, "op")]
mock_training.return_value = {
"TrainingJobArn": "test_arn",
"ResponseMetadata": {"HTTPStatusCode": 404},
}
with pytest.raises(AirflowException):
self.sagemaker.execute(None)
@mock.patch("airflow.providers.amazon.aws.operators.sagemaker.SageMakerTrainingOperator.defer")
@mock.patch.object(
SageMakerHook,
"describe_training_job_with_log",
return_value=(
LogState.JOB_COMPLETE,
{
"TrainingJobStatus": "Completed",
"ResourceConfig": {"InstanceCount": 1},
"TrainingEndTime": datetime(2023, 5, 15),
"TrainingStartTime": datetime(2023, 5, 16),
},
50,
),
)
@mock.patch.object(
SageMakerHook,
"describe_training_job",
return_value={
"TrainingJobStatus": "Completed",
"ResourceConfig": {"InstanceCount": 1},
"TrainingEndTime": datetime(2023, 5, 15),
"TrainingStartTime": datetime(2023, 5, 16),
},
)
@mock.patch.object(SageMakerHook, "create_training_job")
def test_operator_complete_before_defer(
self,
mock_training,
mock_describe_training_job,
mock_describe_training_job_with_log,
mock_defer,
):
mock_training.return_value = {
"TrainingJobArn": "test_arn",
"ResponseMetadata": {"HTTPStatusCode": 200},
}
self.sagemaker.deferrable = True
self.sagemaker.wait_for_completion = True
self.sagemaker.check_if_job_exists = False
self.sagemaker.execute(context=None)
assert not mock_defer.called
@mock.patch.object(
SageMakerHook,
"describe_training_job_with_log",
return_value=(
LogState.WAIT_IN_PROGRESS,
{
"TrainingJobStatus": "Training",
"ResourceConfig": {"InstanceCount": 1},
"TrainingEndTime": datetime(2023, 5, 15),
"TrainingStartTime": datetime(2023, 5, 16),
},
50,
),
)
@mock.patch.object(
SageMakerHook,
"describe_training_job",
return_value={
"TrainingJobStatus": "Training",
"ResourceConfig": {"InstanceCount": 1},
"TrainingEndTime": datetime(2023, 5, 15),
"TrainingStartTime": datetime(2023, 5, 16),
},
)
@mock.patch.object(SageMakerHook, "create_training_job")
def test_operator_defer(
self,
mock_training,
mock_describe_training_job,
mock_describe_training_job_with_log,
):
mock_training.return_value = {
"TrainingJobArn": "test_arn",
"ResponseMetadata": {"HTTPStatusCode": 200},
}
self.sagemaker.deferrable = True
self.sagemaker.wait_for_completion = True
self.sagemaker.check_if_job_exists = False
self.sagemaker.print_log = False
with pytest.raises(TaskDeferred) as exc:
self.sagemaker.execute(context=None)
assert isinstance(exc.value.trigger, SageMakerTrigger), "Trigger is not a SagemakerTrigger"
@mock.patch.object(
SageMakerHook,
"describe_training_job",
return_value={
"InputDataConfig": [
{
"DataSource": {"S3DataSource": {"S3Uri": "s3://input-bucket/input-path"}},
}
],
"ModelArtifacts": {"S3ModelArtifacts": "s3://model-bucket/model-path"},
},
)
@mock.patch.object(
SageMakerHook,
"create_training_job",
return_value={
"TrainingJobArn": "test_arn",
"ResponseMetadata": {"HTTPStatusCode": 200},
},
)
@mock.patch.object(SageMakerBaseOperator, "_check_if_job_exists", return_value=False)
def test_execute_openlineage_data(self, mock_exists, mock_training, mock_desc):
self.sagemaker.execute(None)
assert self.sagemaker.get_openlineage_facets_on_complete(None) == OperatorLineage(
inputs=[Dataset(namespace="s3://input-bucket", name="input-path")],
outputs=[Dataset(namespace="s3://model-bucket", name="model-path")],
)
def test_template_fields(self):
validate_template_fields(self.sagemaker)
| TestSageMakerTrainingOperator |
python | python-markdown__markdown | markdown/extensions/abbr.py | {
"start": 1098,
"end": 2649
} | class ____(Extension):
""" Abbreviation Extension for Python-Markdown. """
def __init__(self, **kwargs):
""" Initiate Extension and set up configs. """
self.config = {
'glossary': [
{},
'A dictionary where the `key` is the abbreviation and the `value` is the definition.'
"Default: `{}`"
],
}
""" Default configuration options. """
super().__init__(**kwargs)
self.abbrs = {}
self.glossary = {}
def reset(self):
""" Clear all previously defined abbreviations. """
self.abbrs.clear()
if (self.glossary):
self.abbrs.update(self.glossary)
def reset_glossary(self):
""" Clear all abbreviations from the glossary. """
self.glossary.clear()
def load_glossary(self, dictionary: dict[str, str]):
"""Adds `dictionary` to our glossary. Any abbreviations that already exist will be overwritten."""
if dictionary:
self.glossary = {**dictionary, **self.glossary}
def extendMarkdown(self, md):
""" Insert `AbbrTreeprocessor` and `AbbrBlockprocessor`. """
if (self.config['glossary'][0]):
self.load_glossary(self.config['glossary'][0])
self.abbrs.update(self.glossary)
md.registerExtension(self)
md.treeprocessors.register(AbbrTreeprocessor(md, self.abbrs), 'abbr', 7)
md.parser.blockprocessors.register(AbbrBlockprocessor(md.parser, self.abbrs), 'abbr', 16)
| AbbrExtension |
python | huggingface__transformers | src/transformers/models/deberta/modeling_deberta.py | {
"start": 30196,
"end": 31004
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LegacyDebertaPredictionHeadTransform(config)
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=True)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LegacyDeberta
| LegacyDebertaLMPredictionHead |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.