language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 9542,
"end": 9615
} | class ____(HTTPClientError):
status_code = 428
| HTTPPreconditionRequired |
python | davidhalter__jedi | test/refactor/extract_function.py | {
"start": 9256,
"end": 9441
} | class ____:
def f():
#? 11 text {'new_name': 'ab', 'until_line': 5, 'until_column': 22}
return glob1 + 2
# ++++++++++++++++++++++++++++++++++++++++++++++++++
glob1 = 1
| X |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/contrib/telnet/protocol.py | {
"start": 959,
"end": 5584
} | class ____:
"""
Parser for the Telnet protocol.
Usage::
def data_received(data):
print(data)
def size_received(rows, columns):
print(rows, columns)
p = TelnetProtocolParser(data_received, size_received)
p.feed(binary_data)
"""
def __init__(
self,
data_received_callback: Callable[[bytes], None],
size_received_callback: Callable[[int, int], None],
ttype_received_callback: Callable[[str], None],
) -> None:
self.data_received_callback = data_received_callback
self.size_received_callback = size_received_callback
self.ttype_received_callback = ttype_received_callback
self._parser = self._parse_coroutine()
self._parser.send(None) # type: ignore
def received_data(self, data: bytes) -> None:
self.data_received_callback(data)
def do_received(self, data: bytes) -> None:
"""Received telnet DO command."""
logger.info("DO %r", data)
def dont_received(self, data: bytes) -> None:
"""Received telnet DONT command."""
logger.info("DONT %r", data)
def will_received(self, data: bytes) -> None:
"""Received telnet WILL command."""
logger.info("WILL %r", data)
def wont_received(self, data: bytes) -> None:
"""Received telnet WONT command."""
logger.info("WONT %r", data)
def command_received(self, command: bytes, data: bytes) -> None:
if command == DO:
self.do_received(data)
elif command == DONT:
self.dont_received(data)
elif command == WILL:
self.will_received(data)
elif command == WONT:
self.wont_received(data)
else:
logger.info("command received %r %r", command, data)
def naws(self, data: bytes) -> None:
"""
Received NAWS. (Window dimensions.)
"""
if len(data) == 4:
# NOTE: the first parameter of struct.unpack should be
# a 'str' object. Both on Py2/py3. This crashes on OSX
# otherwise.
columns, rows = struct.unpack("!HH", data)
self.size_received_callback(rows, columns)
else:
logger.warning("Wrong number of NAWS bytes")
def ttype(self, data: bytes) -> None:
"""
Received terminal type.
"""
subcmd, data = data[0:1], data[1:]
if subcmd == IS:
ttype = data.decode("ascii")
self.ttype_received_callback(ttype)
else:
logger.warning("Received a non-IS terminal type Subnegotiation")
def negotiate(self, data: bytes) -> None:
"""
Got negotiate data.
"""
command, payload = data[0:1], data[1:]
if command == NAWS:
self.naws(payload)
elif command == TTYPE:
self.ttype(payload)
else:
logger.info("Negotiate (%r got bytes)", len(data))
def _parse_coroutine(self) -> Generator[None, bytes, None]:
"""
Parser state machine.
Every 'yield' expression returns the next byte.
"""
while True:
d = yield
if d == int2byte(0):
pass # NOP
# Go to state escaped.
elif d == IAC:
d2 = yield
if d2 == IAC:
self.received_data(d2)
# Handle simple commands.
elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA):
self.command_received(d2, b"")
# Handle IAC-[DO/DONT/WILL/WONT] commands.
elif d2 in (DO, DONT, WILL, WONT):
d3 = yield
self.command_received(d2, d3)
# Subnegotiation
elif d2 == SB:
# Consume everything until next IAC-SE
data = []
while True:
d3 = yield
if d3 == IAC:
d4 = yield
if d4 == SE:
break
else:
data.append(d4)
else:
data.append(d3)
self.negotiate(b"".join(data))
else:
self.received_data(d)
def feed(self, data: bytes) -> None:
"""
Feed data to the parser.
"""
for b in data:
self._parser.send(int2byte(b))
| TelnetProtocolParser |
python | jazzband__django-pipeline | tests/tests/test_compressor.py | {
"start": 757,
"end": 23206
} | class ____(TestCase):
def setUp(self):
self.maxDiff = None
self.compressor = Compressor()
default_collector.collect()
def test_js_compressor_class(self):
self.assertEqual(self.compressor.js_compressor, YuglifyCompressor)
def test_css_compressor_class(self):
self.assertEqual(self.compressor.css_compressor, YuglifyCompressor)
def test_concatenate_and_rewrite(self):
css = self.compressor.concatenate_and_rewrite(
[_("pipeline/css/first.css"), _("pipeline/css/second.css")],
"css/screen.css",
)
expected = """.concat {\n display: none;\n}\n\n.concatenate {\n display: block;\n}\n""" # noqa
self.assertEqual(expected, css)
def test_concatenate(self):
js = self.compressor.concatenate(
[_("pipeline/js/first.js"), _("pipeline/js/second.js")]
)
expected = """(function() {\n window.concat = function() {\n console.log(arguments);\n }\n}()) // No semicolon\n\n;(function() {\n window.cat = function() {\n console.log("hello world");\n }\n}());\n""" # noqa
self.assertEqual(expected, js)
@patch.object(base64, "b64encode")
def test_encoded_content(self, mock):
self.compressor.asset_contents.clear()
self.compressor.encoded_content(_("pipeline/images/arrow.png"))
self.assertTrue(mock.called)
mock.reset_mock()
self.compressor.encoded_content(_("pipeline/images/arrow.png"))
self.assertFalse(mock.called)
def test_encoded_content_output(self):
self.compressor.asset_contents.clear()
encoded = self.compressor.encoded_content(_("pipeline/images/arrow.png"))
expected = (
"iVBORw0KGgoAAAANSUhEUgAAAAkAAAAGCAYAAAARx7TFAAAAMk"
"lEQVR42oXKwQkAMAxC0Q7rEk5voSEepCHC9/SOpLV3JPULgArV"
"RtDIMEEiQ4NECRNdciCfK3K3wvEAAAAASUVORK5CYII="
)
self.assertEqual(encoded, expected)
def test_relative_path(self):
relative_path = self.compressor.relative_path(
"images/sprite.png",
"css/screen.css",
)
self.assertEqual(relative_path, "../images/sprite.png")
def test_base_path(self):
base_path = self.compressor.base_path(
[_("js/templates/form.jst"), _("js/templates/field.jst")]
)
self.assertEqual(base_path, _("js/templates"))
def test_absolute_path(self):
absolute_path = self.compressor.absolute_path(
"../../images/sprite.png", "css/plugins/"
)
self.assertEqual(absolute_path, "images/sprite.png")
absolute_path = self.compressor.absolute_path(
"/images/sprite.png", "css/plugins/"
)
self.assertEqual(absolute_path, "/images/sprite.png")
def test_template_name(self):
name = self.compressor.template_name("templates/photo/detail.jst", "templates/")
self.assertEqual(name, "photo_detail")
name = self.compressor.template_name("templates/photo_edit.jst", "")
self.assertEqual(name, "photo_edit")
name = self.compressor.template_name(
r"templates\photo\detail.jst", # noqa
"templates\\",
)
self.assertEqual(name, "photo_detail")
@pipeline_settings(TEMPLATE_SEPARATOR="/")
def test_template_name_separator(self):
name = self.compressor.template_name("templates/photo/detail.jst", "templates/")
self.assertEqual(name, "photo/detail")
name = self.compressor.template_name("templates/photo_edit.jst", "")
self.assertEqual(name, "photo_edit")
name = self.compressor.template_name(
r"templates\photo\detail.jst", # noqa
"templates\\",
)
self.assertEqual(name, "photo/detail")
def test_compile_templates(self):
templates = self.compressor.compile_templates(
[_("pipeline/templates/photo/list.jst")]
)
self.assertEqual(
templates,
"""window.JST = window.JST || {};\n%s\nwindow.JST[\'list\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%>\\n </div>\\n</div>\');\n""" # noqa
% TEMPLATE_FUNC,
)
templates = self.compressor.compile_templates(
[
_("pipeline/templates/video/detail.jst"),
_("pipeline/templates/photo/detail.jst"),
]
)
self.assertEqual(
templates,
"""window.JST = window.JST || {};\n%s\nwindow.JST[\'video_detail\'] = template(\'<div class="video">\\n <video src="<%%= src %%>" />\\n <div class="caption">\\n <%%= description %%>\\n </div>\\n</div>\');\nwindow.JST[\'photo_detail\'] = template(\'<div class="photo">\\n <img src="<%%= src %%>" />\\n <div class="caption">\\n <%%= caption %%> by <%%= author %%>\\n </div>\\n</div>\');\n""" # noqa
% TEMPLATE_FUNC,
)
def test_embeddable(self):
self.assertFalse(
self.compressor.embeddable(_("pipeline/images/sprite.png"), None)
)
self.assertFalse(
self.compressor.embeddable(_("pipeline/images/arrow.png"), "datauri")
)
self.assertTrue(
self.compressor.embeddable(_("pipeline/images/embed/arrow.png"), "datauri")
)
self.assertFalse(
self.compressor.embeddable(_("pipeline/images/arrow.dat"), "datauri")
)
def test_construct_asset_path(self):
asset_path = self.compressor.construct_asset_path(
"../../images/sprite.png", "css/plugins/gallery.css", "css/gallery.css"
)
self.assertEqual(asset_path, "../images/sprite.png")
asset_path = self.compressor.construct_asset_path(
"/images/sprite.png", "css/plugins/gallery.css", "css/gallery.css"
)
self.assertEqual(asset_path, "/images/sprite.png")
def test_concatenate_with_url_rewrite(self) -> None:
output = self.compressor.concatenate(
[
_("pipeline/css/urls.css"),
],
file_sep="",
output_filename="css/screen.css",
rewrite_path_re=CSS_REWRITE_PATH_RE,
)
self.assertEqual(
""".embedded-url-svg {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E% 3C/svg%3E");
}
@font-face {
font-family: 'Pipeline';
src: url('../pipeline/fonts/pipeline.eot');
src: url('../pipeline/fonts/pipeline.eot?#iefix') format('embedded-opentype');
src: local('☺'), url('../pipeline/fonts/pipeline.woff') format('woff'), url('../pipeline/fonts/pipeline.ttf') format('truetype'), url('../pipeline/fonts/pipeline.svg#IyfZbseF') format('svg');
font-weight: normal;
font-style: normal;
}
.relative-url {
background-image: url(../pipeline/images/sprite-buttons.png);
}
.relative-url-querystring {
background-image: url(../pipeline/images/sprite-buttons.png?v=1.0#foo=bar);
}
.absolute-url {
background-image: url(/images/sprite-buttons.png);
}
.absolute-full-url {
background-image: url(http://localhost/images/sprite-buttons.png);
}
.no-protocol-url {
background-image: url(//images/sprite-buttons.png);
}
.anchor-tag-url {
background-image: url(#image-gradient);
}
@font-face{src:url(../pipeline/fonts/pipeline.eot);src:url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype'),url(../pipeline/fonts/pipeline.woff) format('woff'),url(../pipeline/fonts/pipeline.ttf) format('truetype');}
""", # noqa
output,
)
def test_concatenate_with_url_rewrite_data_uri(self):
output = self.compressor.concatenate(
[
_("pipeline/css/nested/nested.css"),
],
file_sep="",
output_filename="pipeline/screen.css",
rewrite_path_re=CSS_REWRITE_PATH_RE,
)
self.assertEqual(
""".data-url {
background-image: url(data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E);
}
.data-url-quoted {
background-image: url('data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E');
}
""", # noqa
output,
)
def test_concatenate_css_with_sourcemap(self) -> None:
output = self.compressor.concatenate(
[
_("pipeline/css/sourcemap.css"),
],
file_sep="",
output_filename="css/sourcemap-bundle.css",
rewrite_path_re=CSS_REWRITE_PATH_RE,
)
self.assertEqual(
output,
"div {\n"
" display: inline;\n"
"}\n"
"\n"
"span {\n"
" display: block;\n"
"}\n"
"\n"
"\n"
"//# sourceMappingURL=../pipeline/css/sourcemap1.css.map\n"
"\n"
"//@ sourceMappingURL=../pipeline/css/sourcemap2.css.map \n"
"\n"
"/*# sourceMappingURL=../pipeline/css/sourcemap3.css.map */\n"
"\n"
"/*@ sourceMappingURL=../pipeline/css/sourcemap4.css.map */\n"
"\n"
"//# sourceURL=../pipeline/css/sourcemap5.css.map\n"
"\n"
"//@ sourceURL=../pipeline/css/sourcemap6.css.map \n"
"\n"
"/*# sourceURL=../pipeline/css/sourcemap7.css.map */\n"
"\n"
"/*@ sourceURL=../pipeline/css/sourcemap8.css.map */\n",
)
def test_concatenate_js_with_sourcemap(self) -> None:
output = self.compressor.concatenate(
[
_("pipeline/js/sourcemap.js"),
],
file_sep=";",
output_filename="js/sourcemap-bundle.js",
rewrite_path_re=JS_REWRITE_PATH_RE,
)
self.assertEqual(
output,
"const abc = 123;\n"
"\n"
"\n"
"//# sourceMappingURL=../pipeline/js/sourcemap1.js.map\n"
"\n"
"//@ sourceMappingURL=../pipeline/js/sourcemap2.js.map \n"
"\n"
"/*# sourceMappingURL=../pipeline/js/sourcemap3.js.map */\n"
"\n"
"/*@ sourceMappingURL=../pipeline/js/sourcemap4.js.map */\n"
"\n"
"//# sourceURL=../pipeline/js/sourcemap5.js.map\n"
"\n"
"//@ sourceURL=../pipeline/js/sourcemap6.js.map \n"
"\n"
"/*# sourceURL=../pipeline/js/sourcemap7.js.map */\n"
"\n"
"/*@ sourceURL=../pipeline/js/sourcemap8.js.map */\n",
)
def test_concatenate_without_rewrite_path_re(self) -> None:
message = (
"Compressor.concatenate() was called without passing "
"rewrite_path_re_= or output_filename=. If you are "
"specializing Compressor, please update your call "
"to remain compatible with future changes."
)
with self.assertWarnsMessage(DeprecationWarning, message):
output = self.compressor.concatenate(
[
_("pipeline/js/sourcemap.js"),
],
file_sep=";",
output_filename="js/sourcemap-bundle.js",
)
self.assertEqual(
output,
"const abc = 123;\n"
"\n"
"\n"
"//# sourceMappingURL=sourcemap1.js.map\n"
"\n"
"//@ sourceMappingURL=sourcemap2.js.map \n"
"\n"
"/*# sourceMappingURL=sourcemap3.js.map */\n"
"\n"
"/*@ sourceMappingURL=sourcemap4.js.map */\n"
"\n"
"//# sourceURL=sourcemap5.js.map\n"
"\n"
"//@ sourceURL=sourcemap6.js.map \n"
"\n"
"/*# sourceURL=sourcemap7.js.map */\n"
"\n"
"/*@ sourceURL=sourcemap8.js.map */\n",
)
def test_concatenate_without_output_filename(self) -> None:
message = (
"Compressor.concatenate() was called without passing "
"rewrite_path_re_= or output_filename=. If you are "
"specializing Compressor, please update your call "
"to remain compatible with future changes."
)
with self.assertWarnsMessage(DeprecationWarning, message):
output = self.compressor.concatenate(
[
_("pipeline/js/sourcemap.js"),
],
file_sep=";",
rewrite_path_re=JS_REWRITE_PATH_RE,
)
self.assertEqual(
output,
"const abc = 123;\n"
"\n"
"\n"
"//# sourceMappingURL=sourcemap1.js.map\n"
"\n"
"//@ sourceMappingURL=sourcemap2.js.map \n"
"\n"
"/*# sourceMappingURL=sourcemap3.js.map */\n"
"\n"
"/*@ sourceMappingURL=sourcemap4.js.map */\n"
"\n"
"//# sourceURL=sourcemap5.js.map\n"
"\n"
"//@ sourceURL=sourcemap6.js.map \n"
"\n"
"/*# sourceURL=sourcemap7.js.map */\n"
"\n"
"/*@ sourceURL=sourcemap8.js.map */\n",
)
def test_concatenate_without_file_sep(self) -> None:
message = (
"Compressor.concatenate() was called without passing "
"file_sep=. If you are specializing Compressor, please "
"update your call to remain compatible with future changes. "
"Defaulting to JavaScript behavior for "
"backwards-compatibility."
)
with self.assertWarnsMessage(DeprecationWarning, message):
output = self.compressor.concatenate(
[
_("pipeline/js/first.js"),
_("pipeline/js/second.js"),
],
output_filename="js/sourcemap-bundle.js",
rewrite_path_re=JS_REWRITE_PATH_RE,
)
self.assertEqual(
output,
"(function() {\n"
" window.concat = function() {\n"
" console.log(arguments);\n"
" }\n"
"}()) // No semicolon\n"
"\n"
";(function() {\n"
" window.cat = function() {\n"
' console.log("hello world");\n'
" }\n"
"}());\n",
)
def test_legacy_concatenate_and_rewrite(self) -> None:
message = (
"Compressor.concatenate_and_rewrite() is deprecated. Please "
"call concatenate() instead."
)
with self.assertWarnsMessage(DeprecationWarning, message):
output = self.compressor.concatenate_and_rewrite(
[
_("pipeline/css/urls.css"),
],
"css/screen.css",
)
self.assertEqual(
""".embedded-url-svg {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 32 32' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 8h24M4 16h24M4 24h24'/%3E% 3C/svg%3E");
}
@font-face {
font-family: 'Pipeline';
src: url('../pipeline/fonts/pipeline.eot');
src: url('../pipeline/fonts/pipeline.eot?#iefix') format('embedded-opentype');
src: local('☺'), url('../pipeline/fonts/pipeline.woff') format('woff'), url('../pipeline/fonts/pipeline.ttf') format('truetype'), url('../pipeline/fonts/pipeline.svg#IyfZbseF') format('svg');
font-weight: normal;
font-style: normal;
}
.relative-url {
background-image: url(../pipeline/images/sprite-buttons.png);
}
.relative-url-querystring {
background-image: url(../pipeline/images/sprite-buttons.png?v=1.0#foo=bar);
}
.absolute-url {
background-image: url(/images/sprite-buttons.png);
}
.absolute-full-url {
background-image: url(http://localhost/images/sprite-buttons.png);
}
.no-protocol-url {
background-image: url(//images/sprite-buttons.png);
}
.anchor-tag-url {
background-image: url(#image-gradient);
}
@font-face{src:url(../pipeline/fonts/pipeline.eot);src:url(../pipeline/fonts/pipeline.eot?#iefix) format('embedded-opentype'),url(../pipeline/fonts/pipeline.woff) format('woff'),url(../pipeline/fonts/pipeline.ttf) format('truetype');}
""", # noqa
output,
)
def test_legacy_concatenate_and_rewrite_with_data_uri(self) -> None:
message = (
"Compressor.concatenate_and_rewrite() is deprecated. Please "
"call concatenate() instead."
)
with self.assertWarnsMessage(DeprecationWarning, message):
output = self.compressor.concatenate_and_rewrite(
[
_("pipeline/css/nested/nested.css"),
],
"pipeline/screen.css",
)
self.assertEqual(
""".data-url {
background-image: url(data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E);
}
.data-url-quoted {
background-image: url('data:image/svg+xml;charset=US-ASCII,%3C%3Fxml%20version%3D%221.0%22%20encoding%3D%22iso-8859-1%22%3F%3E%3C!DOCTYPE%20svg%20PUBLIC%20%22-%2F%2FW3C%2F%2FDTD%20SVG%201.1%2F%2FEN%22%20%22http%3A%2F%2Fwww.w3.org%2FGraphics%2FSVG%2F1.1%2FDTD%2Fsvg11.dtd%22%3E%3Csvg%20version%3D%221.1%22%20id%3D%22Layer_1%22%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20xmlns%3Axlink%3D%22http%3A%2F%2Fwww.w3.org%2F1999%2Fxlink%22%20x%3D%220px%22%20y%3D%220px%22%20%20width%3D%2212px%22%20height%3D%2214px%22%20viewBox%3D%220%200%2012%2014%22%20style%3D%22enable-background%3Anew%200%200%2012%2014%3B%22%20xml%3Aspace%3D%22preserve%22%3E%3Cpath%20d%3D%22M11%2C6V5c0-2.762-2.239-5-5-5S1%2C2.238%2C1%2C5v1H0v8h12V6H11z%20M6.5%2C9.847V12h-1V9.847C5.207%2C9.673%2C5%2C9.366%2C5%2C9%20c0-0.553%2C0.448-1%2C1-1s1%2C0.447%2C1%2C1C7%2C9.366%2C6.793%2C9.673%2C6.5%2C9.847z%20M9%2C6H3V5c0-1.657%2C1.343-3%2C3-3s3%2C1.343%2C3%2C3V6z%22%2F%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3Cg%3E%3C%2Fg%3E%3C%2Fsvg%3E');
}
""", # noqa
output,
)
@skipIf(sys.platform.startswith("win"), "requires posix platform")
def test_compressor_subprocess_unicode(self):
path = os.path.dirname(os.path.dirname(__file__))
content = open(path + "/assets/css/unicode.css", encoding="utf-8").read()
output = SubProcessCompressor(False).execute_command(("cat",), content)
self.assertEqual(
""".some_class {
// Some unicode
content: "áéíóú";
}
""",
output,
)
def tearDown(self):
default_collector.clear()
| CompressorTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 37057,
"end": 42347
} | class ____(Request):
"""
Get the debug image events for the requested amount of iterations per each task's metric
:param metrics: List metrics for which the envents will be retreived
:type metrics: Sequence[TaskMetric]
:param iters: Max number of latest iterations for which to return debug images
:type iters: int
:param navigate_earlier: If set then events are retreived from latest
iterations to earliest ones. Otherwise from earliest iterations to the latest.
The default is True
:type navigate_earlier: bool
:param refresh: If set then scroll will be moved to the latest iterations. The
default is False
:type refresh: bool
:param scroll_id: Scroll ID of previous call (used for getting more results)
:type scroll_id: str
"""
_service = "events"
_action = "debug_images"
_version = "2.13"
_schema = {
"definitions": {
"task_metric": {
"properties": {
"metric": {
"description": "Metric name. If not specified then all metrics for this task will be returned",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
},
"properties": {
"iters": {
"description": "Max number of latest iterations for which to return debug images",
"type": "integer",
},
"metrics": {
"description": "List metrics for which the envents will be retreived",
"items": {"$ref": "#/definitions/task_metric"},
"type": "array",
},
"navigate_earlier": {
"description": "If set then events are retreived from latest iterations to earliest ones. Otherwise from earliest iterations to the latest. The default is True",
"type": "boolean",
},
"refresh": {
"description": "If set then scroll will be moved to the latest iterations. The default is False",
"type": "boolean",
},
"scroll_id": {
"description": "Scroll ID of previous call (used for getting more results)",
"type": "string",
},
},
"required": ["metrics"],
"type": "object",
}
def __init__(
self,
metrics: List[Any],
iters: Optional[int] = None,
navigate_earlier: Optional[bool] = None,
refresh: Optional[bool] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(DebugImagesRequest, self).__init__(**kwargs)
self.metrics = metrics
self.iters = iters
self.navigate_earlier = navigate_earlier
self.refresh = refresh
self.scroll_id = scroll_id
@schema_property("metrics")
def metrics(self) -> List[Any]:
return self._property_metrics
@metrics.setter
def metrics(self, value: List[Any]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [TaskMetric.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metrics", TaskMetric, is_array=True)
self._property_metrics = value
@schema_property("iters")
def iters(self) -> Optional[int]:
return self._property_iters
@iters.setter
def iters(self, value: Optional[int]) -> None:
if value is None:
self._property_iters = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iters", six.integer_types)
self._property_iters = value
@schema_property("navigate_earlier")
def navigate_earlier(self) -> Optional[bool]:
return self._property_navigate_earlier
@navigate_earlier.setter
def navigate_earlier(self, value: Optional[bool]) -> None:
if value is None:
self._property_navigate_earlier = None
return
self.assert_isinstance(value, "navigate_earlier", (bool,))
self._property_navigate_earlier = value
@schema_property("refresh")
def refresh(self) -> Optional[bool]:
return self._property_refresh
@refresh.setter
def refresh(self, value: Optional[bool]) -> None:
if value is None:
self._property_refresh = None
return
self.assert_isinstance(value, "refresh", (bool,))
self._property_refresh = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| DebugImagesRequest |
python | mlflow__mlflow | mlflow/genai/judges/utils/__init__.py | {
"start": 3341,
"end": 4661
} | class ____(StrEnum):
"""
A categorical rating for an assessment.
Example:
.. code-block:: python
from mlflow.genai.judges import CategoricalRating
from mlflow.entities import Feedback
# Create feedback with categorical rating
feedback = Feedback(
name="my_metric", value=CategoricalRating.YES, rationale="The metric is passing."
)
"""
YES = "yes"
NO = "no"
UNKNOWN = "unknown"
@classmethod
def _missing_(cls, value: str):
value = value.lower()
for member in cls:
if member == value:
return member
return cls.UNKNOWN
__all__ = [
# Local functions
"get_default_model",
"get_default_optimizer",
"validate_judge_model",
"CategoricalRating",
# Databricks adapter
"call_chat_completions",
"InvokeDatabricksModelOutput",
"InvokeJudgeModelHelperOutput",
# Gateway adapter
"_NATIVE_PROVIDERS",
# LiteLLM adapter
"_suppress_litellm_nonfatal_errors",
# Invocation utils
"FieldExtraction",
"invoke_judge_model",
"get_chat_completions_with_structured_output",
# Prompt utils
"DatabricksLLMJudgePrompts",
"format_prompt",
"add_output_format_instructions",
]
| CategoricalRating |
python | spyder-ide__spyder | spyder/plugins/run/api.py | {
"start": 4493,
"end": 4775
} | class ____(TypedDict):
"""Run context name schema."""
# CamelCase name of the context.
name: str
# String identifier for the run context. If non-existent or None, then a
# snake_case version of the name is used.
identifier: NotRequired[Optional[str]]
| Context |
python | coleifer__peewee | tests/sqlite.py | {
"start": 4510,
"end": 4953
} | class ____(TableFunction):
params = ['data']
columns = ['part']
name = 'str_split'
def initialize(self, data=None):
self._parts = data.split()
self._idx = 0
def iterate(self, idx):
if self._idx < len(self._parts):
result = (self._parts[self._idx],)
self._idx += 1
return result
raise StopIteration
@skip_unless(IS_SQLITE_9, 'requires sqlite >= 3.9')
| Split |
python | getsentry__sentry | src/sentry/integrations/slack/sdk_client.py | {
"start": 2822,
"end": 4203
} | class ____(WebClient, metaclass=MetaClass):
def __init__(self, integration_id: int):
self.integration_id = integration_id
integration: Integration | RpcIntegration | None
if SiloMode.get_current_mode() == SiloMode.REGION:
"""
# In order to send requests, SlackClient needs to fetch the integration
# to get access tokens which trips up rpc method/transaction
# boundary detection. Those boundaries are not relevant because
# this is a read operation.
"""
with in_test_hide_transaction_boundary():
integration = integration_service.get_integration(
integration_id=integration_id, status=ObjectStatus.ACTIVE
)
else: # control or monolith (local)
integration = Integration.objects.filter(
id=integration_id, status=ObjectStatus.ACTIVE
).first()
if integration is None:
raise ValueError(f"Integration with id {integration_id} not found")
access_token = integration.metadata.get("access_token")
if not access_token:
raise ValueError(f"Missing token for integration with id {integration_id}")
# TODO: missing from old SlackClient: verify_ssl, logging_context
super().__init__(token=access_token)
| SlackSdkClient |
python | pytest-dev__pytest | testing/test_doctest.py | {
"start": 562,
"end": 28112
} | class ____:
def test_collect_testtextfile(self, pytester: Pytester):
w = pytester.maketxtfile(whatever="")
checkfile = pytester.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
"""
)
for x in (pytester.path, checkfile):
# print "checking that %s returns custom items" % (x,)
items, _reprec = pytester.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
items, _reprec = pytester.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, pytester: Pytester):
path = pytester.makepyfile(whatever="#")
for p in (path, pytester.path):
items, _reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, pytester: Pytester):
path = pytester.makepyfile(whatever='""">>> pass"""')
for p in (path, pytester.path):
items, _reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, pytester: Pytester):
path = pytester.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
for p in (path, pytester.path):
items, _reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
@pytest.mark.parametrize("filename", ["__init__", "whatever"])
def test_collect_module_two_doctest_no_modulelevel(
self,
pytester: Pytester,
filename: str,
) -> None:
path = pytester.makepyfile(
**{
filename: """
'# Empty'
def my_func():
">>> magic = 42 "
def useless():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
""",
},
)
for p in (path, pytester.path):
items, _reprec = pytester.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, pytester: Pytester):
p = pytester.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(failed=1)
def test_importmode(self, pytester: Pytester):
pytester.makepyfile(
**{
"src/namespacepkg/innerpkg/__init__.py": "",
"src/namespacepkg/innerpkg/a.py": """
def some_func():
return 42
""",
"src/namespacepkg/innerpkg/b.py": """
from namespacepkg.innerpkg.a import some_func
def my_func():
'''
>>> my_func()
42
'''
return some_func()
""",
}
)
# For 'namespacepkg' to be considered a namespace package, its containing directory
# needs to be reachable from sys.path:
# https://packaging.python.org/en/latest/guides/packaging-namespace-packages
pytester.syspathinsert(pytester.path / "src")
reprec = pytester.inline_run("--doctest-modules", "--import-mode=importlib")
reprec.assertoutcome(passed=1)
def test_new_pattern(self, pytester: Pytester):
p = pytester.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, pytester: Pytester):
"""Test support for multiple --doctest-glob arguments (#1255)."""
pytester.maketxtfile(
xdoc="""
>>> 1
1
"""
)
pytester.makefile(
".foo",
test="""
>>> 1
1
""",
)
pytester.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
assert {x.name for x in pytester.path.iterdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
def test_encoding(self, pytester, test_string, encoding):
"""Test support for doctest_encoding ini option."""
pytester.makeini(
f"""
[pytest]
doctest_encoding={encoding}
"""
)
doctest = f"""
>>> "{test_string}"
{test_string!r}
"""
fn = pytester.path / "test_encoding.txt"
fn.write_text(doctest, encoding=encoding)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_doctest_unexpected_exception(self, pytester: Pytester):
pytester.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"test_doctest_unexpected_exception.txt F *",
"",
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_unexpected_exception.txt _*",
"001 >>> i = 0",
"002 >>> 0 / i",
"UNEXPECTED EXCEPTION: ZeroDivisionError*",
"Traceback (most recent call last):",
*(
(' File "*/doctest.py", line *, in __run', " *")
if sys.version_info <= (3, 14)
else ()
),
*((" *^^^^*", " *", " *") if sys.version_info[:2] == (3, 13) else ()),
' File "<doctest test_doctest_unexpected_exception.txt[1]>", line 1, in <module>',
"ZeroDivisionError: division by zero",
"*/test_doctest_unexpected_exception.txt:2: UnexpectedException",
],
consecutive=True,
)
def test_doctest_outcomes(self, pytester: Pytester):
pytester.maketxtfile(
test_skip="""
>>> 1
1
>>> import pytest
>>> pytest.skip("")
>>> 2
3
""",
test_xfail="""
>>> import pytest
>>> pytest.xfail("xfail_reason")
>>> foo
bar
""",
test_importorskip="""
>>> import pytest
>>> pytest.importorskip("doesnotexist")
>>> foo
bar
""",
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"collected 3 items",
"",
"test_importorskip.txt s *",
"test_skip.txt s *",
"test_xfail.txt x *",
"",
"*= 2 skipped, 1 xfailed in *",
]
)
def test_docstring_partial_context_around_error(self, pytester: Pytester):
"""Test that we show some context before the actual line of a failing
doctest.
"""
pytester.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
'''
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
"005*text-line-3",
"006*text-line-4",
"013*text-line-11",
"014*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
# lines below should be trimmed out
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, pytester: Pytester):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
pytester.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
'''
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
"003*text-line-1",
"004*text-line-2",
"006*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
def test_doctest_linedata_missing(self, pytester: Pytester):
pytester.path.joinpath("hello.py").write_text(
textwrap.dedent(
"""\
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""
),
encoding="utf-8",
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
["*hello*", "006*>>> 1/0*", "*UNEXPECTED*ZeroDivision*", "*1 failed*"]
)
def test_doctest_linedata_on_property(self, pytester: Pytester):
pytester.makepyfile(
"""
class Sample(object):
@property
def some_property(self):
'''
>>> Sample().some_property
'another thing'
'''
return 'something'
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_linedata_on_property.Sample.some_property _*",
"004 ",
"005 *>>> Sample().some_property",
"Expected:",
" 'another thing'",
"Got:",
" 'something'",
"",
"*/test_doctest_linedata_on_property.py:5: DocTestFailure",
"*= 1 failed in *",
]
)
def test_doctest_no_linedata_on_overridden_property(self, pytester: Pytester):
pytester.makepyfile(
"""
class Sample(object):
@property
def some_property(self):
'''
>>> Sample().some_property
'another thing'
'''
return 'something'
some_property = property(some_property.__get__, None, None, some_property.__doc__)
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_no_linedata_on_overridden_property.Sample.some_property _*",
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example",
"[?][?][?] >>> Sample().some_property",
"Expected:",
" 'another thing'",
"Got:",
" 'something'",
"",
"*/test_doctest_no_linedata_on_overridden_property.py:None: DocTestFailure",
"*= 1 failed in *",
]
)
def test_doctest_unex_importerror_only_txt(self, pytester: Pytester):
pytester.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
result = pytester.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
"*UNEXPECTED*ModuleNotFoundError*",
"ModuleNotFoundError: No module named *asdal*",
]
)
def test_doctest_unex_importerror_with_module(self, pytester: Pytester):
pytester.path.joinpath("hello.py").write_text(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
),
encoding="utf-8",
)
pytester.maketxtfile(
"""
>>> import hello
>>>
"""
)
result = pytester.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
"*ModuleNotFoundError: No module named *asdals*",
"*Interrupted: 1 error during collection*",
]
)
def test_doctestmodule(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> x = 1
>>> x == 1
False
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctest_cached_property(self, pytester: Pytester):
p = pytester.makepyfile(
"""
import functools
class Foo:
@functools.cached_property
def foo(self):
'''
>>> assert False, "Tacos!"
'''
...
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.assert_outcomes(failed=1)
assert "Tacos!" in result.stdout.str()
def test_doctestmodule_external_and_issue116(self, pytester: Pytester):
p = pytester.mkpydir("hello")
p.joinpath("__init__.py").write_text(
textwrap.dedent(
"""\
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""
),
encoding="utf-8",
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
"004 *>>> i + 1",
"*Expected:",
"* 2",
"*Got:",
"* 1",
"*:4: DocTestFailure",
]
)
def test_txtfile_failing(self, pytester: Pytester):
p = pytester.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
result = pytester.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
"002 >>> i + 1",
"Expected:",
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure",
]
)
def test_txtfile_with_fixtures(self, pytester: Pytester):
p = pytester.maketxtfile(
"""
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
"""
)
p = pytester.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
reprec = pytester.inline_run(p)
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, pytester: Pytester):
p = pytester.makepyfile(
"""
'''
>>> p = getfixture('tmp_path')
>>> p.is_dir()
True
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def useless():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, pytester: Pytester):
p = pytester.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = pytester.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = pytester.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = pytester.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, pytester: Pytester):
pytester.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = pytester.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, pytester: Pytester):
"""Fix internal error with docstrings containing non-ascii characters."""
pytester.makepyfile(
'''\
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
''' # noqa: RUF001
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
def test_ignore_import_errors_on_doctest(self, pytester: Pytester):
p = pytester.makepyfile(
"""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
"""
)
reprec = pytester.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, pytester: Pytester):
"""#713: Fix --junit-xml option when used with --doctest-modules."""
p = pytester.makepyfile(
"""
def foo():
'''
>>> 1 + 1
3
'''
pass
"""
)
reprec = pytester.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, pytester: Pytester):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
p = pytester.maketxtfile(
test_unicode_doctest="""
.. doctest::
>>> print("Hi\\n\\nByé")
Hi
...
Byé
>>> 1 / 0 # Byé
1
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
def test_unicode_doctest_module(self, pytester: Pytester):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
p = pytester.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_print_unicode_value(self, pytester: Pytester):
"""
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
"""
p = pytester.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
>>> print('\xE5\xE9\xEE\xF8\xFC')
åéîøü
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_reportinfo(self, pytester: Pytester):
"""Make sure that DoctestItem.reportinfo() returns lineno."""
p = pytester.makepyfile(
test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
"""
)
items, _reprec = pytester.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, pytester: Pytester):
"""
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
"""
p = pytester.makepyfile(
setup="""
if __name__ == '__main__':
from setuptools import setup, find_packages
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_main_py_does_not_cause_import_errors(self, pytester: Pytester):
p = pytester.copy_example("doctest/main_py")
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 2 items*", "*1 failed, 1 passed*"])
def test_invalid_setup_py(self, pytester: Pytester):
"""
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
"""
p = pytester.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
result = pytester.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
def test_setup_module(self, pytester: Pytester) -> None:
"""Regression test for #12011 - setup_module not executed when running
with `--doctest-modules`."""
pytester.makepyfile(
"""
CONSTANT = 0
def setup_module():
global CONSTANT
CONSTANT = 1
def test():
assert CONSTANT == 1
"""
)
result = pytester.runpytest("--doctest-modules")
assert result.ret == 0
result.assert_outcomes(passed=1)
| TestDoctests |
python | xlwings__xlwings | tests/test_sheet.py | {
"start": 1791,
"end": 8299
} | class ____(TestBase):
def test_name(self):
self.wb1.sheets[0].name = "NewName"
self.assertEqual(self.wb1.sheets[0].name, "NewName")
def test_names(self):
self.wb1.sheets[0].range("A1").name = "test1"
self.assertEqual(len(self.wb1.sheets[0].names), 0)
self.wb1.sheets[0].names.add("Sheet1!test2", "Sheet1!B2")
self.assertEqual(len(self.wb1.sheets[0].names), 1)
def test_book(self):
self.assertEqual(self.wb1.sheets[0].book.name, self.wb1.name)
def test_index(self):
self.assertEqual(self.wb1.sheets["Sheet1"].index, 1)
def test_range(self):
self.wb1.sheets[0].range("A1").value = 123.0
self.assertEqual(self.wb1.sheets[0].range("A1").value, 123.0)
def test_cells(self):
pass # TODO
def test_activate(self):
if sys.platform.startswith("win") and self.app1.version.major > 14:
# Excel >= 2013 on Win has issues with activating hidden apps correctly
# over two instances
with self.assertRaises(Exception):
self.app1.activate()
else:
self.wb2.activate()
self.wb1.sheets["Sheet2"].activate()
self.assertEqual(self.wb1.sheets.active.name, "Sheet2")
self.assertEqual(xw.apps.keys()[0], self.app1.pid)
self.wb1.sheets[2].activate()
self.assertEqual(self.wb1.sheets.active.index, 3)
self.wb1.sheets(1).activate()
self.assertEqual(self.wb1.sheets.active.index, 1)
def test_select(self):
self.wb2.sheets[1].select()
self.assertEqual(self.wb2.sheets.active, self.wb2.sheets[1])
def test_clear_content(self):
self.wb1.sheets["Sheet2"].range("G10").value = 22
self.wb1.sheets["Sheet2"].clear_contents()
self.assertEqual(self.wb1.sheets["Sheet2"].range("G10").value, None)
def test_clear_formats(self):
self.wb1.sheets["Sheet2"].range("G10").value = 22
self.wb1.sheets["Sheet2"].range("G10").color = (255, 0, 0)
self.wb1.sheets["Sheet2"].clear_formats()
self.assertEqual(self.wb1.sheets["Sheet2"].range("G10").value, 22)
self.assertEqual(self.wb1.sheets["Sheet2"].range("G10").color, None)
def test_clear(self):
self.wb1.sheets["Sheet2"].range("G10").value = 22
self.wb1.sheets["Sheet2"].range("G10").color = (255, 255, 255)
self.wb1.sheets["Sheet2"].clear()
self.assertEqual(self.wb1.sheets["Sheet2"].range("G10").value, None)
self.assertEqual(self.wb1.sheets["Sheet2"].range("G10").color, None)
def test_autofit(self):
sht = self.wb1.sheets["Sheet1"]
sht.range("A1:D4").value = "test_string"
sht.range("A1:D4").row_height = 40
sht.range("A1:D4").column_width = 40
self.assertEqual(sht.range("A1:D4").row_height, 40)
self.assertEqual(sht.range("A1:D4").column_width, 40)
sht.autofit()
self.assertNotEqual(sht.range("A1:D4").row_height, 40)
self.assertNotEqual(sht.range("A1:D4").column_width, 40)
# Just checking if they don't throw an error
sht.autofit("r")
sht.autofit("c")
sht.autofit("rows")
sht.autofit("columns")
def test_delete(self):
self.assertTrue("Sheet1" in [i.name for i in self.wb1.sheets])
self.wb1.sheets["Sheet1"].delete()
self.assertFalse("Sheet1" in [i.name for i in self.wb1.sheets])
def test_used_range(self):
self.wb1.sheets[0].range("A1:C7").value = 1
self.assertEqual(
self.wb1.sheets[0].used_range, self.wb1.sheets[0].range("A1:C7")
)
def test_visible(self):
self.assertTrue(self.wb1.sheets[0].visible)
self.wb1.sheets[0].visible = False
self.assertFalse(self.wb1.sheets[0].visible)
def test_sheet_copy_without_arguments(self):
original_name = self.wb1.sheets[0].name
self.wb1.sheets[0]["A1"].value = "xyz"
self.wb1.sheets[0].copy()
self.assertEqual(self.wb1.sheets[-1].name, original_name + " (2)")
self.assertEqual(self.wb1.sheets[-1]["A1"].value, "xyz")
def test_sheet_copy_with_before_and_after(self):
with self.assertRaises(AssertionError):
self.wb1.sheets[0].copy(before=self.wb1.sheets[0], after=self.wb1.sheets[0])
def test_sheet_copy_before_same_book(self):
original_name = self.wb1.sheets[0].name
self.wb1.sheets[0]["A1"].value = "xyz"
copied_sheet = self.wb1.sheets[0].copy(before=self.wb1.sheets[0])
self.assertNotEqual(self.wb1.sheets[0].name, original_name)
self.assertEqual(self.wb1.sheets[0]["A1"].value, "xyz")
self.assertEqual(copied_sheet.name, self.wb1.sheets[0].name)
def test_sheet_copy_after_same_book(self):
original_name = self.wb1.sheets[0].name
self.wb1.sheets[0]["A1"].value = "xyz"
self.wb1.sheets[0].copy(after=self.wb1.sheets[0])
self.assertNotEqual(self.wb1.sheets[1].name, original_name)
self.assertEqual(self.wb1.sheets[1]["A1"].value, "xyz")
def test_sheet_copy_before_same_book_new_name(self):
self.wb1.sheets[0]["A1"].value = "xyz"
self.wb1.sheets[0].copy(before=self.wb1.sheets[0], name="mycopy")
self.assertEqual(self.wb1.sheets[0].name, "mycopy")
self.assertEqual(self.wb1.sheets[0]["A1"].value, "xyz")
def test_sheet_copy_before_same_book_new_name_already_exists(self):
self.wb1.sheets[0]["A1"].value = "xyz"
self.wb1.sheets[0].copy(before=self.wb1.sheets[0], name="mycopy")
with self.assertRaises(ValueError):
self.wb1.sheets[0].copy(before=self.wb1.sheets[0], name="mycopy")
def test_sheet_copy_before_different_book(self):
self.wb1.sheets[0]["A1"].value = "xyz"
wb2 = self.wb1.app.books.add()
self.wb1.sheets[0].copy(before=wb2.sheets[0])
self.assertEqual(wb2.sheets[0]["A1"].value, self.wb1.sheets[0]["A1"].value)
def test_sheet_copy_before_different_book_same_name(self):
mysheet = self.wb1.sheets.add("mysheet")
mysheet["A1"].value = "xyz"
wb2 = self.wb1.app.books.add()
self.wb1.sheets[0].copy(after=wb2.sheets[0], name="mysheet")
self.assertEqual(wb2.sheets[1]["A1"].value, mysheet["A1"].value)
self.assertEqual(wb2.sheets[1].name, "mysheet")
with self.assertRaises(ValueError):
self.wb1.sheets[0].copy(after=wb2.sheets[0], name="mysheet")
| TestSheet |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_compiler.py | {
"start": 139790,
"end": 149740
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
"""Tests for full text searching"""
__dialect__ = postgresql.dialect()
def setup_test(self):
self.table = Table(
"t",
MetaData(),
Column("id", Integer, primary_key=True),
Column("title", String),
Column("body", String),
)
self.table_alt = table(
"mytable",
column("id", Integer),
column("title", String(128)),
column("body", String(128)),
)
self.matchtable = Table(
"matchtable",
MetaData(),
Column("id", Integer, primary_key=True),
Column("title", String(200)),
)
def _raise_query(self, q):
"""
useful for debugging. just do...
self._raise_query(q)
"""
c = q.compile(dialect=postgresql.dialect())
raise ValueError(c)
def test_match_custom(self):
s = select(self.table_alt.c.id).where(
func.to_tsquery("fat").bool_op("<->")(func.to_tsquery("rat"))
)
self.assert_compile(
s,
"SELECT mytable.id FROM mytable WHERE "
"to_tsquery(%(to_tsquery_1)s) <-> to_tsquery(%(to_tsquery_2)s)",
{"to_tsquery_1": "fat", "to_tsquery_2": "rat"},
)
def test_match_custom_regconfig(self):
s = select(self.table_alt.c.id).where(
func.to_tsquery("english", "fat").bool_op("<->")(
func.to_tsquery("english", "rat")
)
)
self.assert_compile(
s,
"SELECT mytable.id FROM mytable WHERE "
"to_tsquery(%(to_tsquery_1)s, %(to_tsquery_2)s) <-> "
"to_tsquery(%(to_tsquery_3)s, %(to_tsquery_4)s)",
{
"to_tsquery_1": "english",
"to_tsquery_2": "fat",
"to_tsquery_3": "english",
"to_tsquery_4": "rat",
},
)
def test_match_basic(self):
s = select(self.table_alt.c.id).where(
self.table_alt.c.title.match("somestring")
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE mytable.title @@ plainto_tsquery(%(title_1)s)",
)
def test_match_regconfig(self):
s = select(self.table_alt.c.id).where(
self.table_alt.c.title.match(
"somestring", postgresql_regconfig="english"
)
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE mytable.title @@ "
"plainto_tsquery('english', %(title_1)s)",
)
def test_match_tsvector(self):
s = select(self.table_alt.c.id).where(
func.to_tsvector(self.table_alt.c.title).match("somestring")
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE to_tsvector(mytable.title) "
"@@ plainto_tsquery(%(to_tsvector_1)s)",
)
def test_match_tsvectorconfig(self):
s = select(self.table_alt.c.id).where(
func.to_tsvector("english", self.table_alt.c.title).match(
"somestring"
)
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE to_tsvector(%(to_tsvector_1)s, mytable.title) @@ "
"plainto_tsquery(%(to_tsvector_2)s)",
)
def test_match_tsvectorconfig_regconfig(self):
s = select(self.table_alt.c.id).where(
func.to_tsvector("english", self.table_alt.c.title).match(
"somestring", postgresql_regconfig="english"
)
)
self.assert_compile(
s,
"SELECT mytable.id "
"FROM mytable "
"WHERE to_tsvector(%(to_tsvector_1)s, mytable.title) @@ "
"""plainto_tsquery('english', %(to_tsvector_2)s)""",
)
@testing.combinations(
("to_tsvector",),
("to_tsquery",),
("plainto_tsquery",),
("phraseto_tsquery",),
("websearch_to_tsquery",),
("ts_headline",),
argnames="to_ts_name",
)
def test_dont_compile_non_imported(self, to_ts_name):
new_func = type(
to_ts_name,
(GenericFunction,),
{
"_register": False,
"inherit_cache": True,
},
)
with expect_raises_message(
exc.CompileError,
rf"Can't compile \"{to_ts_name}\(\)\" full text search "
f"function construct that does not originate from the "
f'"sqlalchemy.dialects.postgresql" package. '
f'Please ensure "import sqlalchemy.dialects.postgresql" is '
f"called before constructing "
rf"\"sqlalchemy.func.{to_ts_name}\(\)\" to ensure "
f"registration of the correct "
f"argument and return types.",
):
select(new_func("x", "y")).compile(dialect=postgresql.dialect())
@testing.combinations(
(func.to_tsvector,),
(func.to_tsquery,),
(func.plainto_tsquery,),
(func.phraseto_tsquery,),
(func.websearch_to_tsquery,),
argnames="to_ts_func",
)
@testing.variation("use_regconfig", [True, False, "literal"])
def test_to_regconfig_fns(self, to_ts_func, use_regconfig):
"""test #8977"""
matchtable = self.matchtable
fn_name = to_ts_func().name
if use_regconfig.literal:
regconfig = literal("english", REGCONFIG)
elif use_regconfig:
regconfig = "english"
else:
regconfig = None
if regconfig is None:
if fn_name == "to_tsvector":
fn = to_ts_func(matchtable.c.title).match("python")
expected = (
"to_tsvector(matchtable.title) @@ "
"plainto_tsquery($1::VARCHAR)"
)
else:
fn = func.to_tsvector(matchtable.c.title).op("@@")(
to_ts_func("python")
)
expected = (
f"to_tsvector(matchtable.title) @@ {fn_name}($1::VARCHAR)"
)
else:
if fn_name == "to_tsvector":
fn = to_ts_func(regconfig, matchtable.c.title).match("python")
expected = (
"to_tsvector($1::REGCONFIG, matchtable.title) @@ "
"plainto_tsquery($2::VARCHAR)"
)
else:
fn = func.to_tsvector(matchtable.c.title).op("@@")(
to_ts_func(regconfig, "python")
)
expected = (
f"to_tsvector(matchtable.title) @@ "
f"{fn_name}($1::REGCONFIG, $2::VARCHAR)"
)
stmt = matchtable.select().where(fn)
self.assert_compile(
stmt,
"SELECT matchtable.id, matchtable.title "
f"FROM matchtable WHERE {expected}",
dialect="postgresql+asyncpg",
)
@testing.variation("use_regconfig", [True, False, "literal"])
@testing.variation("include_options", [True, False])
@testing.variation("tsquery_in_expr", [True, False])
def test_ts_headline(
self, connection, use_regconfig, include_options, tsquery_in_expr
):
"""test #8977"""
if use_regconfig.literal:
regconfig = literal("english", REGCONFIG)
elif use_regconfig:
regconfig = "english"
else:
regconfig = None
text = (
"The most common type of search is to find all documents "
"containing given query terms and return them in order of "
"their similarity to the query."
)
tsquery = func.to_tsquery("english", "query & similarity")
if regconfig is None:
tsquery_str = "to_tsquery($2::REGCONFIG, $3::VARCHAR)"
else:
tsquery_str = "to_tsquery($3::REGCONFIG, $4::VARCHAR)"
if tsquery_in_expr:
tsquery = case((true(), tsquery), else_=null())
tsquery_str = f"CASE WHEN true THEN {tsquery_str} ELSE NULL END"
is_(tsquery.type._type_affinity, TSQUERY)
args = [text, tsquery]
if regconfig is not None:
args.insert(0, regconfig)
if include_options:
args.append(
"MaxFragments=10, MaxWords=7, "
"MinWords=3, StartSel=<<, StopSel=>>"
)
fn = func.ts_headline(*args)
stmt = select(fn)
if regconfig is None and not include_options:
self.assert_compile(
stmt,
f"SELECT ts_headline($1::VARCHAR, "
f"{tsquery_str}) AS ts_headline_1",
dialect="postgresql+asyncpg",
)
elif regconfig is None and include_options:
self.assert_compile(
stmt,
f"SELECT ts_headline($1::VARCHAR, "
f"{tsquery_str}, $4::VARCHAR) AS ts_headline_1",
dialect="postgresql+asyncpg",
)
elif regconfig is not None and not include_options:
self.assert_compile(
stmt,
f"SELECT ts_headline($1::REGCONFIG, $2::VARCHAR, "
f"{tsquery_str}) AS ts_headline_1",
dialect="postgresql+asyncpg",
)
else:
self.assert_compile(
stmt,
f"SELECT ts_headline($1::REGCONFIG, $2::VARCHAR, "
f"{tsquery_str}, $5::VARCHAR) "
"AS ts_headline_1",
dialect="postgresql+asyncpg",
)
| FullTextSearchTest |
python | jackfrued__Python-100-Days | Day31-35/code/example21.py | {
"start": 239,
"end": 1606
} | class ____():
"""银行账户"""
def __init__(self, balance=0):
self.balance = balance
lock = threading.Lock()
self.condition = threading.Condition(lock)
def withdraw(self, money):
"""取钱"""
with self.condition:
while money > self.balance:
self.condition.wait()
new_balance = self.balance - money
sleep(0.001)
self.balance = new_balance
def deposit(self, money):
"""存钱"""
with self.condition:
new_balance = self.balance + money
sleep(0.001)
self.balance = new_balance
self.condition.notify_all()
def add_money(account):
while True:
money = randint(5, 10)
account.deposit(money)
print(threading.current_thread().name,
':', money, '====>', account.balance)
sleep(0.5)
def sub_money(account):
while True:
money = randint(10, 30)
account.withdraw(money)
print(threading.current_thread().name,
':', money, '<====', account.balance)
sleep(1)
def main():
account = Account()
with ThreadPoolExecutor(max_workers=10) as pool:
for _ in range(5):
pool.submit(add_money, account)
pool.submit(sub_money, account)
if __name__ == '__main__':
main()
| Account |
python | great-expectations__great_expectations | great_expectations/expectations/row_conditions.py | {
"start": 1260,
"end": 1568
} | class ____(ValueError):
"""Raised when the number of conditions exceeds the maximum allowed."""
def __init__(self, count: int, max_conditions: int):
super().__init__(
f"{max_conditions} conditions is the maximum, but {count} conditions are defined"
)
| TooManyConditionsError |
python | networkx__networkx | networkx/tests/test_convert_numpy.py | {
"start": 175,
"end": 19032
} | class ____:
def setup_method(self):
self.G1 = nx.barbell_graph(10, 3)
self.G2 = nx.cycle_graph(10, create_using=nx.DiGraph)
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = nx.cycle_graph(4)
G.add_nodes_from(g)
G.add_weighted_edges_from((u, v, 10 + u) for u, v in g.edges())
return G
def assert_equal(self, G1, G2):
assert sorted(G1.nodes()) == sorted(G2.nodes())
assert sorted(G1.edges()) == sorted(G2.edges())
def identity_conversion(self, G, A, create_using):
assert A.sum() > 0
GG = nx.from_numpy_array(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = nx.empty_graph(0, create_using).__class__(A)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square array."
A = np.array([[1, 2, 3], [4, 5, 6]])
pytest.raises(nx.NetworkXError, nx.from_numpy_array, A)
def test_identity_graph_array(self):
"Conversion from graph to array to graph."
A = nx.to_numpy_array(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_array(self):
"""Conversion from digraph to array to digraph."""
A = nx.to_numpy_array(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_array(self):
"""Conversion from weighted graph to array to weighted graph."""
A = nx.to_numpy_array(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_array(self):
"""Conversion from weighted digraph to array to weighted digraph."""
A = nx.to_numpy_array(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to array to graph with nodelist."""
P4 = nx.path_graph(4)
P3 = nx.path_graph(3)
nodelist = list(P3)
A = nx.to_numpy_array(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
pytest.raises(nx.NetworkXError, nx.to_numpy_array, P3, nodelist=nodelist)
# Make nodelist invalid by including nonexistent nodes
nodelist = [-1, 0, 1]
with pytest.raises(
nx.NetworkXError,
match=f"Nodes {nodelist - P3.nodes} in nodelist is not in G",
):
nx.to_numpy_array(P3, nodelist=nodelist)
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from((n, n + 1, {"weight": 0.5, "other": 0.3}) for n in range(3))
P4 = nx.path_graph(4)
A = nx.to_numpy_array(P4)
np.testing.assert_equal(A, nx.to_numpy_array(WP4, weight=None))
np.testing.assert_equal(0.5 * A, nx.to_numpy_array(WP4))
np.testing.assert_equal(0.3 * A, nx.to_numpy_array(WP4, weight="other"))
def test_from_numpy_array_type(self):
A = np.array([[1]])
G = nx.from_numpy_array(A)
assert isinstance(G[0][0]["weight"], int)
A = np.array([[1]]).astype(float)
G = nx.from_numpy_array(A)
assert isinstance(G[0][0]["weight"], float)
A = np.array([[1]]).astype(str)
G = nx.from_numpy_array(A)
assert isinstance(G[0][0]["weight"], str)
A = np.array([[1]]).astype(bool)
G = nx.from_numpy_array(A)
assert isinstance(G[0][0]["weight"], bool)
A = np.array([[1]]).astype(complex)
G = nx.from_numpy_array(A)
assert isinstance(G[0][0]["weight"], complex)
A = np.array([[1]]).astype(object)
pytest.raises(TypeError, nx.from_numpy_array, A)
A = np.array([[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]])
with pytest.raises(
nx.NetworkXError, match=f"Input array must be 2D, not {A.ndim}"
):
g = nx.from_numpy_array(A)
def test_from_numpy_array_dtype(self):
dt = [("weight", float), ("cost", int)]
A = np.array([[(1.0, 2)]], dtype=dt)
G = nx.from_numpy_array(A)
assert isinstance(G[0][0]["weight"], float)
assert isinstance(G[0][0]["cost"], int)
assert G[0][0]["cost"] == 2
assert G[0][0]["weight"] == 1.0
def test_from_numpy_array_parallel_edges(self):
"""Tests that the :func:`networkx.from_numpy_array` function
interprets integer weights as the number of parallel edges when
creating a multigraph.
"""
A = np.array([[1, 1], [1, 2]])
# First, with a simple graph, each integer entry in the adjacency
# matrix is interpreted as the weight of a single edge in the graph.
expected = nx.DiGraph()
edges = [(0, 0), (0, 1), (1, 0)]
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
expected.add_edge(1, 1, weight=2)
actual = nx.from_numpy_array(A, parallel_edges=True, create_using=nx.DiGraph)
assert graphs_equal(actual, expected)
actual = nx.from_numpy_array(A, parallel_edges=False, create_using=nx.DiGraph)
assert graphs_equal(actual, expected)
# Now each integer entry in the adjacency matrix is interpreted as the
# number of parallel edges in the graph if the appropriate keyword
# argument is specified.
edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)]
expected = nx.MultiDiGraph()
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
actual = nx.from_numpy_array(
A, parallel_edges=True, create_using=nx.MultiDiGraph
)
assert graphs_equal(actual, expected)
expected = nx.MultiDiGraph()
expected.add_edges_from(set(edges), weight=1)
# The sole self-loop (edge 0) on vertex 1 should have weight 2.
expected[1][1][0]["weight"] = 2
actual = nx.from_numpy_array(
A, parallel_edges=False, create_using=nx.MultiDiGraph
)
assert graphs_equal(actual, expected)
@pytest.mark.parametrize(
"dt",
(
None, # default
int, # integer dtype
np.dtype(
[("weight", "f8"), ("color", "i1")]
), # Structured dtype with named fields
),
)
def test_from_numpy_array_no_edge_attr(self, dt):
A = np.array([[0, 1], [1, 0]], dtype=dt)
G = nx.from_numpy_array(A, edge_attr=None)
assert "weight" not in G.edges[0, 1]
assert len(G.edges[0, 1]) == 0
def test_from_numpy_array_multiedge_no_edge_attr(self):
A = np.array([[0, 2], [2, 0]])
G = nx.from_numpy_array(A, create_using=nx.MultiDiGraph, edge_attr=None)
assert all("weight" not in e for _, e in G[0][1].items())
assert len(G[0][1][0]) == 0
def test_from_numpy_array_custom_edge_attr(self):
A = np.array([[0, 2], [3, 0]])
G = nx.from_numpy_array(A, edge_attr="cost")
assert "weight" not in G.edges[0, 1]
assert G.edges[0, 1]["cost"] == 3
def test_symmetric(self):
"""Tests that a symmetric array has edges added only once to an
undirected multigraph when using :func:`networkx.from_numpy_array`.
"""
A = np.array([[0, 1], [1, 0]])
G = nx.from_numpy_array(A, create_using=nx.MultiGraph)
expected = nx.MultiGraph()
expected.add_edge(0, 1, weight=1)
assert graphs_equal(G, expected)
def test_dtype_int_graph(self):
"""Test that setting dtype int actually gives an integer array.
For more information, see GitHub pull request #1363.
"""
G = nx.complete_graph(3)
A = nx.to_numpy_array(G, dtype=int)
assert A.dtype == int
def test_dtype_int_multigraph(self):
"""Test that setting dtype int actually gives an integer array.
For more information, see GitHub pull request #1363.
"""
G = nx.MultiGraph(nx.complete_graph(3))
A = nx.to_numpy_array(G, dtype=int)
assert A.dtype == int
@pytest.fixture
def multigraph_test_graph():
G = nx.MultiGraph()
G.add_edge(1, 2, weight=7)
G.add_edge(1, 2, weight=70)
return G
@pytest.mark.parametrize(("operator", "expected"), ((sum, 77), (min, 7), (max, 70)))
def test_numpy_multigraph(multigraph_test_graph, operator, expected):
A = nx.to_numpy_array(multigraph_test_graph, multigraph_weight=operator)
assert A[1, 0] == expected
def test_to_numpy_array_multigraph_nodelist(multigraph_test_graph):
G = multigraph_test_graph
G.add_edge(0, 1, weight=3)
A = nx.to_numpy_array(G, nodelist=[1, 2])
assert A.shape == (2, 2)
assert A[1, 0] == 77
@pytest.mark.parametrize(
"G, expected",
[
(nx.Graph(), np.array([[0, 1 + 2j], [1 + 2j, 0]], dtype=complex)),
(nx.DiGraph(), np.array([[0, 1 + 2j], [0, 0]], dtype=complex)),
],
)
def test_to_numpy_array_complex_weights(G, expected):
G.add_edge(0, 1, weight=1 + 2j)
A = nx.to_numpy_array(G, dtype=complex)
npt.assert_array_equal(A, expected)
def test_to_numpy_array_arbitrary_weights():
G = nx.DiGraph()
w = 922337203685477580102 # Out of range for int64
G.add_edge(0, 1, weight=922337203685477580102) # val not representable by int64
A = nx.to_numpy_array(G, dtype=object)
expected = np.array([[0, w], [0, 0]], dtype=object)
npt.assert_array_equal(A, expected)
# Undirected
A = nx.to_numpy_array(G.to_undirected(), dtype=object)
expected = np.array([[0, w], [w, 0]], dtype=object)
npt.assert_array_equal(A, expected)
@pytest.mark.parametrize(
"func, expected",
((min, -1), (max, 10), (sum, 11), (np.mean, 11 / 3), (np.median, 2)),
)
def test_to_numpy_array_multiweight_reduction(func, expected):
"""Test various functions for reducing multiedge weights."""
G = nx.MultiDiGraph()
weights = [-1, 2, 10.0]
for w in weights:
G.add_edge(0, 1, weight=w)
A = nx.to_numpy_array(G, multigraph_weight=func, dtype=float)
assert np.allclose(A, [[0, expected], [0, 0]])
# Undirected case
A = nx.to_numpy_array(G.to_undirected(), multigraph_weight=func, dtype=float)
assert np.allclose(A, [[0, expected], [expected, 0]])
@pytest.mark.parametrize(
("G, expected"),
[
(nx.Graph(), [[(0, 0), (10, 5)], [(10, 5), (0, 0)]]),
(nx.DiGraph(), [[(0, 0), (10, 5)], [(0, 0), (0, 0)]]),
],
)
def test_to_numpy_array_structured_dtype_attrs_from_fields(G, expected):
"""When `dtype` is structured (i.e. has names) and `weight` is None, use
the named fields of the dtype to look up edge attributes."""
G.add_edge(0, 1, weight=10, cost=5.0)
dtype = np.dtype([("weight", int), ("cost", int)])
A = nx.to_numpy_array(G, dtype=dtype, weight=None)
expected = np.asarray(expected, dtype=dtype)
npt.assert_array_equal(A, expected)
def test_to_numpy_array_structured_dtype_single_attr_default():
G = nx.path_graph(3)
dtype = np.dtype([("weight", float)]) # A single named field
A = nx.to_numpy_array(G, dtype=dtype, weight=None)
expected = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=float)
npt.assert_array_equal(A["weight"], expected)
@pytest.mark.parametrize(
("field_name", "expected_attr_val"),
[
("weight", 1),
("cost", 3),
],
)
def test_to_numpy_array_structured_dtype_single_attr(field_name, expected_attr_val):
G = nx.Graph()
G.add_edge(0, 1, cost=3)
dtype = np.dtype([(field_name, float)])
A = nx.to_numpy_array(G, dtype=dtype, weight=None)
expected = np.array([[0, expected_attr_val], [expected_attr_val, 0]], dtype=float)
npt.assert_array_equal(A[field_name], expected)
@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph))
@pytest.mark.parametrize(
"edge",
[
(0, 1), # No edge attributes
(0, 1, {"weight": 10}), # One edge attr
(0, 1, {"weight": 5, "flow": -4}), # Multiple but not all edge attrs
(0, 1, {"weight": 2.0, "cost": 10, "flow": -45}), # All attrs
],
)
def test_to_numpy_array_structured_dtype_multiple_fields(graph_type, edge):
G = graph_type([edge])
dtype = np.dtype([("weight", float), ("cost", float), ("flow", float)])
A = nx.to_numpy_array(G, dtype=dtype, weight=None)
for attr in dtype.names:
expected = nx.to_numpy_array(G, dtype=float, weight=attr)
npt.assert_array_equal(A[attr], expected)
@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph()))
def test_to_numpy_array_structured_dtype_scalar_nonedge(G):
G.add_edge(0, 1, weight=10)
dtype = np.dtype([("weight", float), ("cost", float)])
A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=np.nan)
for attr in dtype.names:
expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=np.nan)
npt.assert_array_equal(A[attr], expected)
@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph()))
def test_to_numpy_array_structured_dtype_nonedge_ary(G):
"""Similar to the scalar case, except has a different non-edge value for
each named field."""
G.add_edge(0, 1, weight=10)
dtype = np.dtype([("weight", float), ("cost", float)])
nonedges = np.array([(0, np.inf)], dtype=dtype)
A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=nonedges)
for attr in dtype.names:
nonedge = nonedges[attr]
expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=nonedge)
npt.assert_array_equal(A[attr], expected)
def test_to_numpy_array_structured_dtype_with_weight_raises():
"""Using both a structured dtype (with named fields) and specifying a `weight`
parameter is ambiguous."""
G = nx.path_graph(3)
dtype = np.dtype([("weight", int), ("cost", int)])
exception_msg = "Specifying `weight` not supported for structured dtypes"
with pytest.raises(ValueError, match=exception_msg):
nx.to_numpy_array(G, dtype=dtype) # Default is weight="weight"
with pytest.raises(ValueError, match=exception_msg):
nx.to_numpy_array(G, dtype=dtype, weight="cost")
@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph))
def test_to_numpy_array_structured_multigraph_raises(graph_type):
G = nx.path_graph(3, create_using=graph_type)
dtype = np.dtype([("weight", int), ("cost", int)])
with pytest.raises(nx.NetworkXError, match="Structured arrays are not supported"):
nx.to_numpy_array(G, dtype=dtype, weight=None)
def test_from_numpy_array_nodelist_bad_size():
"""An exception is raised when `len(nodelist) != A.shape[0]`."""
n = 5 # Number of nodes
A = np.diag(np.ones(n - 1), k=1) # Adj. matrix for P_n
expected = nx.path_graph(n)
assert graphs_equal(nx.from_numpy_array(A, edge_attr=None), expected)
nodes = list(range(n))
assert graphs_equal(
nx.from_numpy_array(A, edge_attr=None, nodelist=nodes), expected
)
# Too many node labels
nodes = list(range(n + 1))
with pytest.raises(ValueError, match="nodelist must have the same length as A"):
nx.from_numpy_array(A, nodelist=nodes)
# Too few node labels
nodes = list(range(n - 1))
with pytest.raises(ValueError, match="nodelist must have the same length as A"):
nx.from_numpy_array(A, nodelist=nodes)
@pytest.mark.parametrize(
"nodes",
(
[4, 3, 2, 1, 0],
[9, 7, 1, 2, 8],
["a", "b", "c", "d", "e"],
[(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)],
["A", 2, 7, "spam", (1, 3)],
),
)
def test_from_numpy_array_nodelist(nodes):
A = np.diag(np.ones(4), k=1)
# Without edge attributes
expected = nx.relabel_nodes(
nx.path_graph(5), mapping=dict(enumerate(nodes)), copy=True
)
G = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes)
assert graphs_equal(G, expected)
# With edge attributes
nx.set_edge_attributes(expected, 1.0, name="weight")
G = nx.from_numpy_array(A, nodelist=nodes)
assert graphs_equal(G, expected)
@pytest.mark.parametrize(
"nodes",
(
[4, 3, 2, 1, 0],
[9, 7, 1, 2, 8],
["a", "b", "c", "d", "e"],
[(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)],
["A", 2, 7, "spam", (1, 3)],
),
)
def test_from_numpy_array_nodelist_directed(nodes):
A = np.diag(np.ones(4), k=1)
# Without edge attributes
H = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)])
expected = nx.relabel_nodes(H, mapping=dict(enumerate(nodes)), copy=True)
G = nx.from_numpy_array(A, create_using=nx.DiGraph, edge_attr=None, nodelist=nodes)
assert graphs_equal(G, expected)
# With edge attributes
nx.set_edge_attributes(expected, 1.0, name="weight")
G = nx.from_numpy_array(A, create_using=nx.DiGraph, nodelist=nodes)
assert graphs_equal(G, expected)
@pytest.mark.parametrize(
"nodes",
(
[4, 3, 2, 1, 0],
[9, 7, 1, 2, 8],
["a", "b", "c", "d", "e"],
[(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)],
["A", 2, 7, "spam", (1, 3)],
),
)
def test_from_numpy_array_nodelist_multigraph(nodes):
A = np.array(
[
[0, 1, 0, 0, 0],
[1, 0, 2, 0, 0],
[0, 2, 0, 3, 0],
[0, 0, 3, 0, 4],
[0, 0, 0, 4, 0],
]
)
H = nx.MultiGraph()
for i, edge in enumerate(((0, 1), (1, 2), (2, 3), (3, 4))):
H.add_edges_from(itertools.repeat(edge, i + 1))
expected = nx.relabel_nodes(H, mapping=dict(enumerate(nodes)), copy=True)
G = nx.from_numpy_array(
A,
parallel_edges=True,
create_using=nx.MultiGraph,
edge_attr=None,
nodelist=nodes,
)
assert graphs_equal(G, expected)
@pytest.mark.parametrize(
"nodes",
(
[4, 3, 2, 1, 0],
[9, 7, 1, 2, 8],
["a", "b", "c", "d", "e"],
[(0, 0), (1, 1), (2, 3), (0, 2), (3, 1)],
["A", 2, 7, "spam", (1, 3)],
),
)
@pytest.mark.parametrize("graph", (nx.complete_graph, nx.cycle_graph, nx.wheel_graph))
def test_from_numpy_array_nodelist_rountrip(graph, nodes):
G = graph(5)
A = nx.to_numpy_array(G)
expected = nx.relabel_nodes(G, mapping=dict(enumerate(nodes)), copy=True)
H = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes)
assert graphs_equal(H, expected)
# With an isolated node
G = graph(4)
G.add_node("foo")
A = nx.to_numpy_array(G)
expected = nx.relabel_nodes(G, mapping=dict(zip(G.nodes, nodes)), copy=True)
H = nx.from_numpy_array(A, edge_attr=None, nodelist=nodes)
assert graphs_equal(H, expected)
| TestConvertNumpyArray |
python | django-haystack__django-haystack | test_haystack/elasticsearch7_tests/test_backend.py | {
"start": 7602,
"end": 24955
} | class ____(TestCase):
def setUp(self):
super().setUp()
# Wipe it clean.
self.raw_es = elasticsearch.Elasticsearch(
settings.HAYSTACK_CONNECTIONS["elasticsearch"]["URL"]
)
clear_elasticsearch_index()
# Stow.
self.old_ui = connections["elasticsearch"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = Elasticsearch7MockSearchIndex()
self.smmidni = Elasticsearch7MockSearchIndexWithSkipDocument()
self.smtmmi = Elasticsearch7MaintainTypeMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["elasticsearch"]._index = self.ui
self.sb = connections["elasticsearch"].get_backend()
# Force the backend to rebuild the mapping each time.
self.sb.existing_mapping = {}
self.sb.setup()
self.sample_objs = []
for i in range(1, 4):
mock = MockModel()
mock.id = i
mock.author = "daniel%s" % i
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections["elasticsearch"]._index = self.old_ui
super().tearDown()
self.sb.silently_fail = True
def raw_search(self, query):
try:
return self.raw_es.search(
q="*:*",
index=settings.HAYSTACK_CONNECTIONS["elasticsearch"]["INDEX_NAME"],
)
except elasticsearch.TransportError:
return {}
def test_non_silent(self):
bad_sb = connections["elasticsearch"].backend(
"bad",
URL="http://omg.wtf.bbq:1000/",
INDEX_NAME="whatver",
SILENTLY_FAIL=False,
TIMEOUT=1,
)
try:
bad_sb.update(self.smmi, self.sample_objs)
self.fail()
except:
pass
try:
bad_sb.remove("core.mockmodel.1")
self.fail()
except:
pass
try:
bad_sb.clear()
self.fail()
except:
pass
try:
bad_sb.search("foo")
self.fail()
except:
pass
def test_update_no_documents(self):
url = settings.HAYSTACK_CONNECTIONS["elasticsearch"]["URL"]
index_name = settings.HAYSTACK_CONNECTIONS["elasticsearch"]["INDEX_NAME"]
sb = connections["elasticsearch"].backend(
"elasticsearch", URL=url, INDEX_NAME=index_name, SILENTLY_FAIL=True
)
self.assertEqual(sb.update(self.smmi, []), None)
sb = connections["elasticsearch"].backend(
"elasticsearch", URL=url, INDEX_NAME=index_name, SILENTLY_FAIL=False
)
try:
sb.update(self.smmi, [])
self.fail()
except:
pass
def test_update(self):
self.sb.update(self.smmi, self.sample_objs)
# Check what Elasticsearch thinks is there.
self.assertEqual(self.raw_search("*:*")["hits"]["total"]["value"], 3)
self.assertEqual(
sorted(
[res["_source"] for res in self.raw_search("*:*")["hits"]["hits"]],
key=lambda x: x["id"],
),
[
{
"django_id": "1",
"django_ct": "core.mockmodel",
"name": "daniel1",
"name_exact": "daniel1",
"text": "Indexed!\n1\n",
"pub_date": "2009-02-24T00:00:00",
"id": "core.mockmodel.1",
},
{
"django_id": "2",
"django_ct": "core.mockmodel",
"name": "daniel2",
"name_exact": "daniel2",
"text": "Indexed!\n2\n",
"pub_date": "2009-02-23T00:00:00",
"id": "core.mockmodel.2",
},
{
"django_id": "3",
"django_ct": "core.mockmodel",
"name": "daniel3",
"name_exact": "daniel3",
"text": "Indexed!\n3\n",
"pub_date": "2009-02-22T00:00:00",
"id": "core.mockmodel.3",
},
],
)
def test_update_with_SkipDocument_raised(self):
self.sb.update(self.smmidni, self.sample_objs)
# Check what Elasticsearch thinks is there.
res = self.raw_search("*:*")["hits"]
self.assertEqual(res["total"]["value"], 2)
self.assertListEqual(
sorted([x["_source"]["id"] for x in res["hits"]]),
["core.mockmodel.1", "core.mockmodel.2"],
)
def test_remove(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search("*:*")["hits"]["total"]["value"], 3)
self.sb.remove(self.sample_objs[0])
self.assertEqual(self.raw_search("*:*")["hits"]["total"]["value"], 2)
self.assertEqual(
sorted(
[res["_source"] for res in self.raw_search("*:*")["hits"]["hits"]],
key=operator.itemgetter("django_id"),
),
[
{
"django_id": "2",
"django_ct": "core.mockmodel",
"name": "daniel2",
"name_exact": "daniel2",
"text": "Indexed!\n2\n",
"pub_date": "2009-02-23T00:00:00",
"id": "core.mockmodel.2",
},
{
"django_id": "3",
"django_ct": "core.mockmodel",
"name": "daniel3",
"name_exact": "daniel3",
"text": "Indexed!\n3\n",
"pub_date": "2009-02-22T00:00:00",
"id": "core.mockmodel.3",
},
],
)
def test_remove_succeeds_on_404(self):
self.sb.silently_fail = False
self.sb.remove("core.mockmodel.421")
def test_clear(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(
self.raw_search("*:*").get("hits", {}).get("total", {}).get("value", 0),
3,
)
self.sb.clear()
self.assertEqual(
self.raw_search("*:*").get("hits", {}).get("total", {}).get("value", 0),
0,
)
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(
self.raw_search("*:*").get("hits", {}).get("total", {}).get("value", 0),
3,
)
self.sb.clear([AnotherMockModel])
self.assertEqual(
self.raw_search("*:*").get("hits", {}).get("total", {}).get("value", 0),
3,
)
self.sb.clear([MockModel])
self.assertEqual(
self.raw_search("*:*").get("hits", {}).get("total", {}).get("value", 0),
0,
)
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(
self.raw_search("*:*").get("hits", {}).get("total", {}).get("value", 0),
3,
)
self.sb.clear([AnotherMockModel, MockModel])
self.assertEqual(
self.raw_search("*:*").get("hits", {}).get("total", {}).get("value", 0),
0,
)
def test_search(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search("*:*")["hits"]["total"]["value"], 3)
self.assertEqual(self.sb.search(""), {"hits": 0, "results": []})
self.assertEqual(self.sb.search("*:*")["hits"], 3)
self.assertEqual(
set([result.pk for result in self.sb.search("*:*")["results"]]),
{"2", "1", "3"},
)
self.assertEqual(self.sb.search("", highlight=True), {"hits": 0, "results": []})
self.assertEqual(self.sb.search("Index", highlight=True)["hits"], 3)
self.assertEqual(
sorted(
[
result.highlighted[0]
for result in self.sb.search("Index", highlight=True)["results"]
]
),
[
"<em>Indexed</em>!\n1",
"<em>Indexed</em>!\n2",
"<em>Indexed</em>!\n3",
],
)
self.assertEqual(self.sb.search("Indx")["hits"], 0)
self.assertEqual(self.sb.search("indaxed")["spelling_suggestion"], "index")
self.assertEqual(
self.sb.search("arf", spelling_query="indexyd")["spelling_suggestion"],
"index",
)
self.assertEqual(
self.sb.search("", facets={"name": {}}), {"hits": 0, "results": []}
)
results = self.sb.search("Index", facets={"name": {}})
self.assertEqual(results["hits"], 3)
self.assertSetEqual(
set(results["facets"]["fields"]["name"]),
{("daniel3", 1), ("daniel2", 1), ("daniel1", 1)},
)
self.assertEqual(
self.sb.search(
"",
date_facets={
"pub_date": {
"start_date": datetime.date(2008, 1, 1),
"end_date": datetime.date(2009, 4, 1),
"gap_by": "month",
"gap_amount": 1,
}
},
),
{"hits": 0, "results": []},
)
results = self.sb.search(
"Index",
date_facets={
"pub_date": {
"start_date": datetime.date(2008, 1, 1),
"end_date": datetime.date(2009, 4, 1),
"gap_by": "month",
"gap_amount": 1,
}
},
)
self.assertEqual(results["hits"], 3)
self.assertEqual(
results["facets"]["dates"]["pub_date"],
[(datetime.datetime(2009, 2, 1, 0, 0), 3)],
)
self.assertEqual(
self.sb.search("", query_facets=[("name", "[* TO e]")]),
{"hits": 0, "results": []},
)
results = self.sb.search("Index", query_facets=[("name", "[* TO e]")])
self.assertEqual(results["hits"], 3)
self.assertEqual(results["facets"]["queries"], {"name": 3})
self.assertEqual(
self.sb.search("", narrow_queries={"name:daniel1"}),
{"hits": 0, "results": []},
)
results = self.sb.search("Index", narrow_queries={"name:daniel1"})
self.assertEqual(results["hits"], 1)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(
isinstance(
self.sb.search("index", result_class=MockSearchResult)["results"][0],
MockSearchResult,
)
)
# Check the use of ``limit_to_registered_models``.
self.assertEqual(
self.sb.search("", limit_to_registered_models=False),
{"hits": 0, "results": []},
)
self.assertEqual(
self.sb.search("*:*", limit_to_registered_models=False)["hits"], 3
)
self.assertEqual(
sorted(
[
result.pk
for result in self.sb.search(
"*:*", limit_to_registered_models=False
)["results"]
]
),
["1", "2", "3"],
)
# Stow.
old_limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False
self.assertEqual(self.sb.search(""), {"hits": 0, "results": []})
self.assertEqual(self.sb.search("*:*")["hits"], 3)
self.assertEqual(
sorted([result.pk for result in self.sb.search("*:*")["results"]]),
["1", "2", "3"],
)
# Restore.
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models
def test_spatial_search_parameters(self):
from django.contrib.gis.geos import Point
p1 = Point(1.23, 4.56)
kwargs = self.sb.build_search_kwargs(
"*:*",
distance_point={"field": "location", "point": p1},
sort_by=(("distance", "desc"),),
)
self.assertIn("sort", kwargs)
self.assertEqual(1, len(kwargs["sort"]))
geo_d = kwargs["sort"][0]["_geo_distance"]
# ElasticSearch supports the GeoJSON-style lng, lat pairs so unlike Solr the values should be
# in the same order as we used to create the Point():
# http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-geo-distance-filter.html#_lat_lon_as_array_4
self.assertDictEqual(
geo_d, {"location": [1.23, 4.56], "unit": "km", "order": "desc"}
)
def test_more_like_this(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search("*:*")["hits"]["total"]["value"], 3)
# A functional MLT example with enough data to work is below. Rely on
# this to ensure the API is correct enough.
self.assertEqual(self.sb.more_like_this(self.sample_objs[0])["hits"], 0)
self.assertEqual(
[
result.pk
for result in self.sb.more_like_this(self.sample_objs[0])["results"]
],
[],
)
def test_build_schema(self):
old_ui = connections["elasticsearch"].get_unified_index()
(content_field_name, mapping) = self.sb.build_schema(old_ui.all_searchfields())
self.assertEqual(content_field_name, "text")
self.assertEqual(len(mapping), 4 + 2) # + 2 management fields
self.assertEqual(
mapping,
{
"django_ct": {
"type": "keyword",
},
"django_id": {
"type": "keyword",
},
"text": {
"type": "text",
"analyzer": "snowball",
},
"name": {
"type": "text",
"analyzer": "snowball",
},
"name_exact": {
"type": "keyword",
},
"pub_date": {
"type": "date",
},
},
)
ui = UnifiedIndex()
ui.build(indexes=[Elasticsearch7ComplexFacetsMockSearchIndex()])
(content_field_name, mapping) = self.sb.build_schema(ui.all_searchfields())
self.assertEqual(content_field_name, "text")
self.assertEqual(len(mapping), 16 + 2)
self.assertEqual(
mapping,
{
"django_ct": {
"type": "keyword",
},
"django_id": {
"type": "keyword",
},
"text": {
"type": "text",
"analyzer": "snowball",
},
"name": {
"type": "text",
"analyzer": "snowball",
},
"name_exact": {
"type": "keyword",
},
"is_active": {
"type": "boolean",
},
"is_active_exact": {
"type": "boolean",
},
"post_count": {
"type": "long",
},
"post_count_i": {
"type": "long",
},
"average_rating": {
"type": "float",
},
"average_rating_exact": {
"type": "float",
},
"pub_date": {
"type": "date",
},
"pub_date_exact": {
"type": "date",
},
"created": {
"type": "date",
},
"created_exact": {
"type": "date",
},
"sites": {
"type": "text",
"analyzer": "snowball",
},
"sites_exact": {
"type": "keyword",
},
"facet_field": {
"type": "keyword",
},
},
)
def test_verify_type(self):
old_ui = connections["elasticsearch"].get_unified_index()
ui = UnifiedIndex()
smtmmi = Elasticsearch7MaintainTypeMockSearchIndex()
ui.build(indexes=[smtmmi])
connections["elasticsearch"]._index = ui
sb = connections["elasticsearch"].get_backend()
sb.update(smtmmi, self.sample_objs)
self.assertEqual(sb.search("*:*")["hits"], 3)
self.assertEqual(
[result.month for result in sb.search("*:*")["results"]], ["02", "02", "02"]
)
connections["elasticsearch"]._index = old_ui
| Elasticsearch7SearchBackendTestCase |
python | celery__celery | t/unit/tasks/test_stamping.py | {
"start": 1579,
"end": 2492
} | class ____(StampingVisitor):
def on_signature(self, actual_sig: Signature, **headers) -> dict:
return {"on_signature": True}
def on_group_start(self, actual_sig: Signature, **headers) -> dict:
return {"on_group_start": True}
def on_chain_start(self, actual_sig: Signature, **headers) -> dict:
return {"on_chain_start": True}
def on_chord_header_start(self, actual_sig: Signature, **header) -> dict:
s = super().on_chord_header_start(actual_sig, **header)
s.update({"on_chord_header_start": True})
return s
def on_chord_body(self, actual_sig: Signature, **header) -> dict:
return {"on_chord_body": True}
def on_callback(self, actual_sig: Signature, **header) -> dict:
return {"on_callback": True}
def on_errback(self, actual_sig: Signature, **header) -> dict:
return {"on_errback": True}
| BooleanStampingVisitor |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-dashscope/llama_index/readers/dashscope/domain/lease_domains.py | {
"start": 7952,
"end": 10013
} | class ____(DictToObject):
def __init__(self, file_id, lease_id, file_name, type, param) -> None:
self.file_id = file_id
self.lease_id = lease_id
self.file_name = file_name
self.type = type
self.param = param
@classmethod
def from_dict(cls, data: dict):
"""
Creates an instance of `QueryFileResult` from a dictionary.
Args:
data (dict): A dictionary containing the necessary keys and values corresponding to the class attributes.
Returns:
QueryFileResult: An instance of `QueryFileResult` populated with data from the input dictionary.
"""
if "param" not in data:
raise ValueError("download_lease result param is required")
default_values = {
"file_id": "",
"lease_id": "",
"file_name": "",
"type": FileDownloadType.HTTP.value,
"param": HttpDownloadParameter.from_dict(data["param"]),
}
return cls(
file_id=data.get("file_id", default_values["file_id"]),
lease_id=data.get("lease_id", default_values["lease_id"]),
file_name=data.get("file_name", default_values["file_name"]),
type=FileDownloadType.from_value(data.get("type", default_values["type"])),
param=default_values["param"],
)
def download(self, escape: bool = False):
if self.type == FileDownloadType.HTTP:
if self.param.method == "GET":
json_bytes = requests.get(
url=self.param.url, headers=self.param.headers
).content
json_str = json_bytes.decode("utf-8")
if escape:
return json.dumps(json_str, ensure_ascii=False)
else:
return json_str
else:
raise ValueError(f"Invalid download method: {self.param.method}")
else:
raise ValueError(f"Invalid download type: {self.type}")
| DownloadFileLeaseResult |
python | networkx__networkx | networkx/classes/tests/test_digraph_historical.py | {
"start": 123,
"end": 3668
} | class ____(HistoricalTests):
@classmethod
def setup_class(cls):
HistoricalTests.setup_class()
cls.G = nx.DiGraph
def test_in_degree(self):
G = self.G()
G.add_nodes_from("GJK")
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")])
assert sorted(d for n, d in G.in_degree()) == [0, 0, 0, 0, 1, 2, 2]
assert dict(G.in_degree()) == {
"A": 0,
"C": 2,
"B": 1,
"D": 2,
"G": 0,
"K": 0,
"J": 0,
}
def test_out_degree(self):
G = self.G()
G.add_nodes_from("GJK")
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")])
assert sorted(v for k, v in G.in_degree()) == [0, 0, 0, 0, 1, 2, 2]
assert dict(G.out_degree()) == {
"A": 2,
"C": 1,
"B": 2,
"D": 0,
"G": 0,
"K": 0,
"J": 0,
}
def test_degree_digraph(self):
H = nx.DiGraph()
H.add_edges_from([(1, 24), (1, 2)])
assert sorted(d for n, d in H.in_degree([1, 24])) == [0, 1]
assert sorted(d for n, d in H.out_degree([1, 24])) == [0, 2]
assert sorted(d for n, d in H.degree([1, 24])) == [1, 2]
def test_neighbors(self):
G = self.G()
G.add_nodes_from("GJK")
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")])
assert sorted(G.neighbors("C")) == ["D"]
assert sorted(G["C"]) == ["D"]
assert sorted(G.neighbors("A")) == ["B", "C"]
pytest.raises(nx.NetworkXError, G.neighbors, "j")
pytest.raises(nx.NetworkXError, G.neighbors, "j")
def test_successors(self):
G = self.G()
G.add_nodes_from("GJK")
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")])
assert sorted(G.successors("A")) == ["B", "C"]
assert sorted(G.successors("A")) == ["B", "C"]
assert sorted(G.successors("G")) == []
assert sorted(G.successors("D")) == []
assert sorted(G.successors("G")) == []
pytest.raises(nx.NetworkXError, G.successors, "j")
pytest.raises(nx.NetworkXError, G.successors, "j")
def test_predecessors(self):
G = self.G()
G.add_nodes_from("GJK")
G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")])
assert sorted(G.predecessors("C")) == ["A", "B"]
assert sorted(G.predecessors("C")) == ["A", "B"]
assert sorted(G.predecessors("G")) == []
assert sorted(G.predecessors("A")) == []
assert sorted(G.predecessors("G")) == []
assert sorted(G.predecessors("A")) == []
assert sorted(G.successors("D")) == []
pytest.raises(nx.NetworkXError, G.predecessors, "j")
pytest.raises(nx.NetworkXError, G.predecessors, "j")
def test_reverse(self):
G = nx.complete_graph(10)
H = G.to_directed()
HR = H.reverse()
assert nx.is_isomorphic(H, HR)
assert sorted(H.edges()) == sorted(HR.edges())
def test_reverse2(self):
H = nx.DiGraph()
foo = [H.add_edge(u, u + 1) for u in range(5)]
HR = H.reverse()
for u in range(5):
assert HR.has_edge(u + 1, u)
def test_reverse3(self):
H = nx.DiGraph()
H.add_nodes_from([1, 2, 3, 4])
HR = H.reverse()
assert sorted(HR.nodes()) == [1, 2, 3, 4]
| TestDiGraphHistorical |
python | huggingface__transformers | src/transformers/models/autoformer/modeling_autoformer.py | {
"start": 19188,
"end": 29175
} | class ____(nn.Module):
"""
AutoCorrelation Mechanism with the following two phases:
(1) period-based dependencies discovery (2) time delay aggregation
This block replace the canonical self-attention mechanism.
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: Optional[float] = 0.0,
is_decoder: Optional[bool] = False,
bias: Optional[bool] = True,
autocorrelation_factor: Optional[int] = 3,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.autocorrelation_factor = autocorrelation_factor
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_layer from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_layer to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
# (1) period-based dependencies discovery
# Resize (truncation or zero filling)
queries_time_length = query_states.size(1)
values_time_length = value_states.size(1)
if queries_time_length > values_time_length:
query_states = query_states[:, : (queries_time_length - values_time_length), :]
zeros = torch.zeros_like(query_states).float()
value_states = torch.cat([value_states, zeros], dim=1)
key_states = torch.cat([key_states, zeros], dim=1)
else:
value_states = value_states[:, :queries_time_length, :]
key_states = key_states[:, :queries_time_length, :]
query_states_fft = torch.fft.rfft(query_states, n=tgt_len, dim=1)
key_states_fft = torch.fft.rfft(key_states, n=tgt_len, dim=1)
attn_weights = query_states_fft * torch.conj(key_states_fft)
attn_weights = torch.fft.irfft(attn_weights, n=tgt_len, dim=1) # Autocorrelation(Q,K)
src_len = key_states.size(1)
channel = key_states.size(2)
if attn_weights.size() != (bsz * self.num_heads, tgt_len, channel):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, channel)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, channel)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, channel)
else:
attn_weights_reshaped = None
# time delay aggregation
time_length = value_states.size(1)
autocorrelations = attn_weights.view(bsz, self.num_heads, tgt_len, channel)
# find top k autocorrelations delays
top_k = int(self.autocorrelation_factor * math.log(time_length))
autocorrelations_mean_on_head_channel = torch.mean(autocorrelations, dim=(1, -1)) # bsz x tgt_len
if self.training:
autocorrelations_mean_on_bsz = torch.mean(autocorrelations_mean_on_head_channel, dim=0)
_, top_k_delays_index = torch.topk(autocorrelations_mean_on_bsz, top_k)
top_k_autocorrelations = torch.stack(
[autocorrelations_mean_on_head_channel[:, top_k_delays_index[i]] for i in range(top_k)], dim=-1
)
else:
top_k_autocorrelations, top_k_delays_index = torch.topk(
autocorrelations_mean_on_head_channel, top_k, dim=1
)
top_k_autocorrelations = torch.softmax(top_k_autocorrelations, dim=-1) # bsz x top_k
# compute aggregation: value_states.roll(delay) * top_k_autocorrelations(delay)
if not self.training:
# used for compute values_states.roll(delay) in inference
tmp_values = value_states.repeat(1, 2, 1)
init_index = (
torch.arange(time_length)
.view(1, -1, 1)
.repeat(bsz * self.num_heads, 1, channel)
.to(value_states.device)
)
delays_agg = torch.zeros_like(value_states).float() # bsz x time_length x channel
for i in range(top_k):
# compute value_states roll delay
if not self.training:
tmp_delay = init_index + top_k_delays_index[:, i].view(-1, 1, 1).repeat(
self.num_heads, tgt_len, channel
)
value_states_roll_delay = torch.gather(tmp_values, dim=1, index=tmp_delay)
else:
value_states_roll_delay = value_states.roll(shifts=-int(top_k_delays_index[i]), dims=1)
# aggregation
top_k_autocorrelations_at_delay = (
top_k_autocorrelations[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel)
)
delays_agg += value_states_roll_delay * top_k_autocorrelations_at_delay
attn_output = delays_agg.contiguous()
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
| AutoformerAttention |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/init_ops_test.py | {
"start": 29728,
"end": 32225
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (10, 10)))
@test_util.run_deprecated_v1
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (10, 10)))
@test_util.run_deprecated_v1
def testDuplicatedInitializer(self):
init = init_ops.orthogonal_initializer()
self.assertFalse(duplicated_initializer(self, init, 1, (10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.orthogonal_initializer()
with self.session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[5])
@test_util.run_deprecated_v1
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)
with self.session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
t2 = init2(shape).eval()
self.assertAllClose(t1, t2 / 3.14)
@test_util.run_deprecated_v1
def testShapesValues(self):
for dtype in [dtypes.float32, dtypes.float64]:
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops.orthogonal_initializer(dtype=dtype)
tol = 1e-5 if dtype == dtypes.float32 else 1e-12
with self.session(graph=ops.Graph(), use_gpu=True):
# Check the shape
t = init(shape).eval()
self.assertAllEqual(shape, t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
| OrthogonalInitializerTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/_typed_dict_helper.py | {
"start": 620,
"end": 684
} | class ____(TypedDict):
a: OptionalIntType
T = TypeVar("T")
| Foo |
python | run-llama__llama_index | llama-index-core/tests/program/test_llm_program.py | {
"start": 1194,
"end": 2829
} | class ____(BaseModel):
__test__ = False
hello: str
def test_llm_program() -> None:
"""Test LLM program."""
output_parser = PydanticOutputParser(output_cls=TestModel)
llm_program = LLMTextCompletionProgram.from_defaults(
output_parser=output_parser,
prompt_template_str="This is a test prompt with a {test_input}.",
llm=MockLLM(),
)
# mock llm
obj_output = llm_program(test_input="hello")
assert isinstance(obj_output, TestModel)
assert obj_output.hello == "world"
def test_llm_program_with_messages() -> None:
"""Test LLM program."""
messages = [ChatMessage(role=MessageRole.USER, content="Test")]
prompt = ChatPromptTemplate(message_templates=messages)
output_parser = PydanticOutputParser(output_cls=TestModel)
llm_program = LLMTextCompletionProgram.from_defaults(
output_parser=output_parser,
prompt=prompt,
llm=MockLLM(),
)
# mock llm
obj_output = llm_program()
assert isinstance(obj_output, TestModel)
assert obj_output.hello == "world"
def test_llm_program_with_messages_and_chat() -> None:
"""Test LLM program."""
messages = [ChatMessage(role=MessageRole.USER, content="Test")]
prompt = ChatPromptTemplate(message_templates=messages)
output_parser = PydanticOutputParser(output_cls=TestModel)
llm_program = LLMTextCompletionProgram.from_defaults(
output_parser=output_parser,
prompt=prompt,
llm=MockChatLLM(),
)
# mock llm
obj_output = llm_program()
assert isinstance(obj_output, TestModel)
assert obj_output.hello == "chat"
| TestModel |
python | kamyu104__LeetCode-Solutions | Python/shortest-path-visiting-all-nodes.py | {
"start": 62,
"end": 784
} | class ____(object):
def shortestPathLength(self, graph):
"""
:type graph: List[List[int]]
:rtype: int
"""
dp = [[float("inf")]*(len(graph))
for _ in xrange(1 << len(graph))]
q = collections.deque()
for i in xrange(len(graph)):
dp[1 << i][i] = 0
q.append((1 << i, i))
while q:
state, node = q.popleft()
steps = dp[state][node]
for nei in graph[node]:
new_state = state | (1 << nei)
if dp[new_state][nei] == float("inf"):
dp[new_state][nei] = steps+1
q.append((new_state, nei))
return min(dp[-1])
| Solution |
python | django__django | tests/admin_filters/tests.py | {
"start": 9286,
"end": 9411
} | class ____(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithDynamicValue]
| DepartmentFilterDynamicValueBookAdmin |
python | google__pytype | pytype/rewrite/frame_test.py | {
"start": 19232,
"end": 22081
} | class ____(FrameTestBase):
"""Test making and calling functions."""
def _make_function(self, code, name):
module_frame = self._make_frame(code, name='__main__')
module_frame.run()
return _get(module_frame, name, _FrameFunction)
def _run_until_call(self, code):
def cond(frame):
return frame.current_opcode.name.startswith('CALL')
frame = self.run_frame_until(code, condition=cond)
return frame
@test_utils.skipBeforePy((3, 11), 'Relies on 3.11+ bytecode')
def test_make_function(self):
f = self._make_function(
"""
def f(x, /, y, z, *, a, b, c):
pass
""",
'f',
)
self.assertIsInstance(f, abstract.InterpreterFunction)
self.assertEqual(f.name, 'f')
sig = f.signatures[0]
self.assertEqual(repr(sig), 'def f(x, /, y, z, *, a, b, c) -> Any')
@test_utils.skipBeforePy((3, 11), 'Relies on 3.11+ bytecode')
def test_function_annotations(self):
f = self._make_function(
"""
def f(x: int, /, y: str, *, a: int, b: int = 1):
pass
""",
'f',
)
self.assertIsInstance(f, abstract.InterpreterFunction)
self.assertEqual(f.name, 'f')
sig = f.signatures[0]
self.assertEqual(repr(sig), 'def f(x, /, y, *, a, b) -> Any')
@test_utils.skipBeforePy((3, 11), 'Relies on 3.11+ bytecode')
def test_function_call_kwargs(self):
frame = self._run_until_call("""
def f(x, *, y):
pass
f(1, y=2)
""")
self.assertEqual(frame._call_helper._kw_names, ('y',))
oparg = frame.current_opcode.arg # pytype: disable=attribute-error
_, _, *args = frame._stack.popn(oparg + 2)
callargs = frame._call_helper.make_function_args(args)
self.assertConstantVar(callargs.posargs[0], 1)
self.assertConstantVar(callargs.kwargs['y'], 2)
@test_utils.skipBeforePy((3, 11), 'Relies on 3.11+ bytecode')
def test_call_function_ex_callargs(self):
"""Test unpacking of concrete *args and **args."""
frame = self._make_frame("""
def f(x, y, z):
pass
a = (1, 2)
kw = {'z': 3}
f(*a, **kw)
""")
with mock.patch.object(
frame_lib.Frame, '_call_function', wraps=frame._call_function
) as mock_call:
frame.run()
(_, callargs), _ = mock_call.call_args_list[0]
self.assertConstantVar(callargs.posargs[0], 1)
self.assertConstantVar(callargs.posargs[1], 2)
self.assertConstantVar(callargs.kwargs['z'], 3)
def test_inplace_fallback(self):
"""Test inplace operator falling back to non-inplace."""
frame = self._make_frame("""
a = 1
a += 2
""")
with mock.patch.object(operators, 'call_binary') as mock_call:
frame.run()
posargs, _ = mock_call.call_args_list[0]
op = posargs[1]
self.assertEqual(op, '__add__')
if __name__ == '__main__':
unittest.main()
| FunctionTest |
python | viewflow__viewflow | viewflow/workflow/migrations/0012_alter_process_data_alter_task_data.py | {
"start": 92,
"end": 565
} | class ____(migrations.Migration):
dependencies = [
("viewflow", "0011_alter_task_created_and_more"),
]
operations = [
migrations.AlterField(
model_name="process",
name="data",
field=models.JSONField(blank=True, default=dict),
),
migrations.AlterField(
model_name="task",
name="data",
field=models.JSONField(blank=True, default=dict),
),
]
| Migration |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 359719,
"end": 360159
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("about", "body", "name", "title")
about = sgqlc.types.Field(String, graphql_name="about")
body = sgqlc.types.Field(String, graphql_name="body")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
title = sgqlc.types.Field(String, graphql_name="title")
| IssueTemplate |
python | gevent__gevent | src/gevent/_config.py | {
"start": 17817,
"end": 17946
} | class ____(AresSettingMixin, Setting):
name = 'ares_flags'
default = None
environment_key = 'GEVENTARES_FLAGS'
| AresFlags |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_stackdriver.py | {
"start": 6385,
"end": 8082
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.stackdriver.StackdriverHook")
def test_execute(self, mock_hook):
operator = StackdriverListNotificationChannelsOperator(task_id=TEST_TASK_ID, filter_=TEST_FILTER)
mock_hook.return_value.list_notification_channels.return_value = [
NotificationChannel(name="test-123")
]
result = operator.execute(context=mock.MagicMock())
mock_hook.return_value.list_notification_channels.assert_called_once_with(
project_id=None,
filter_=TEST_FILTER,
format_=None,
order_by=None,
page_size=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
# Depending on the version of google-apitools installed we might receive the response either with or
# without mutation_records.
assert result in [
[
{
"description": "",
"display_name": "",
"labels": {},
"name": "test-123",
"type_": "",
"user_labels": {},
"verification_status": 0,
}
],
[
{
"description": "",
"display_name": "",
"labels": {},
"mutation_records": [],
"name": "test-123",
"type_": "",
"user_labels": {},
"verification_status": 0,
}
],
]
| TestStackdriverListNotificationChannelsOperator |
python | squidfunk__mkdocs-material | material/plugins/tags/structure/listing/config.py | {
"start": 1492,
"end": 4226
} | class ____(Config):
"""
A listing configuration.
"""
scope = Type(bool, default = False)
"""
Whether to only include pages in the current subsection.
Enabling this setting will only include pages that are on the same level or
on a lower level than the page the listing is on. This allows to create a
listing of tags on a page that only includes pages that are in the same
subsection of the documentation.
"""
shadow = Optional(Type(bool))
"""
Whether to include shadow tags.
This setting allows to override the global setting for shadow tags. If this
setting is not specified, the global `shadow` setting is used.
"""
layout = Optional(Type(str))
"""
The layout to use for rendering the listing.
This setting allows to override the global setting for the layout. If this
setting is not specified, the global `listings_layout` setting is used.
"""
toc = Optional(Type(bool))
"""
Whether to populate the table of contents with anchor links to tags.
This setting allows to override the global setting for the layout. If this
setting is not specified, the global `listings_toc` setting is used.
"""
include = TagSet()
"""
Tags to include in the listing.
If this set is empty, the listing does not filter pages by tags. Otherwise,
all pages that have at least one of the tags in this set will be included.
"""
exclude = TagSet()
"""
Tags to exclude from the listing.
If this set is empty, the listing does not filter pages by tags. Otherwise,
all pages that have at least one of the tags in this set will be excluded.
"""
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def _representer(dumper: Dumper, config: ListingConfig):
"""
Return a serializable representation of a listing configuration.
Arguments:
dumper: The YAML dumper.
config: The listing configuration.
Returns:
Serializable representation.
"""
copy = config.copy()
# Convert the include and exclude tag sets to lists of strings
copy.include = list(map(str, copy.include)) if copy.include else None
copy.exclude = list(map(str, copy.exclude)) if copy.exclude else None
# Return serializable listing configuration
data = { k: v for k, v in copy.items() if v is not None }
return dumper.represent_dict(data)
# -----------------------------------------------------------------------------
# Register listing configuration YAML representer
yaml.add_representer(ListingConfig, _representer)
| ListingConfig |
python | getsentry__sentry | tests/sentry/api/serializers/test_event.py | {
"start": 17532,
"end": 25532
} | class ____(TestCase):
def test_event_breadcrumb_formatting(self) -> None:
event = self.store_event(
data={
"breadcrumbs": [
{"category": "generic", "message": "should not format this"},
{
"category": "query",
"message": "select * from table where something = $1",
},
]
},
project_id=self.project.id,
)
result = serialize(event, None, SqlFormatEventSerializer())
breadcrumb_entry = result["entries"][0]
breadcrumbs = breadcrumb_entry["data"]["values"]
assert breadcrumb_entry["type"] == "breadcrumbs"
# First breadcrumb should not have a message_formatted property
assert breadcrumbs[0]["message"] == "should not format this"
assert "messageRaw" not in breadcrumbs[0]
assert "messageFormat" not in breadcrumbs[0]
# Second breadcrumb should have whitespace added
assert breadcrumbs[1]["message"] == "select *\nfrom table\nwhere something = $1"
assert breadcrumbs[1]["messageRaw"] == "select * from table where something = $1"
assert breadcrumbs[1]["messageFormat"] == "sql"
def test_event_breadcrumb_formatting_remove_quotes(self) -> None:
event = self.store_event(
data={
"breadcrumbs": [
{
"category": "query",
"message": """select "table"."column_name", "table"."column name" from "table" where "something" = $1""",
},
{
"category": "query",
"message": """This is not "SQL" content.""",
},
]
},
project_id=self.project.id,
)
result = serialize(event, None, SqlFormatEventSerializer())
# For breadcrumb 1: should remove quotes from all terms except the one that contains a space ("column name")
assert (
result["entries"][0]["data"]["values"][0]["message"]
== """select table.column_name, table."column name"\nfrom table\nwhere something = $1"""
)
# For breadcrumb 2: Not SQL so shouldn't be changed
assert (
result["entries"][0]["data"]["values"][1]["message"] == """This is not "SQL" content."""
)
def test_adds_release_info(self) -> None:
event = self.store_event(
data={
"tags": {
"sentry:release": "internal@1.0.0",
}
},
project_id=self.project.id,
)
repo = self.create_repo(project=self.project, name=self.project.name)
release = Release.objects.create(
version="internal@1.0.0",
organization=self.organization,
date_released=datetime(2023, 1, 1, tzinfo=UTC),
)
release.add_project(self.project)
release.set_commits(
[
{
"id": "917ac271787e74ff2dbe52b67e77afcff9aaa305",
"repository": repo.name,
"author_email": "bob@example.com",
"author_name": "Bob",
"message": "I hope this fixes it",
"patch_set": [{"path": "src/sentry/models/release.py", "type": "M"}],
}
]
)
result = serialize(event, None, SqlFormatEventSerializer())
assert result["release"]["version"] == "internal@1.0.0"
assert result["release"]["lastCommit"]["id"] == "917ac271787e74ff2dbe52b67e77afcff9aaa305"
def test_event_db_span_formatting(self) -> None:
event_data = get_event("n-plus-one-db/n-plus-one-in-django-new-view")
event_data["contexts"] = {
"trace": {
"trace_id": "530c14e044aa464db6ddb43660e6474f",
"span_id": "139fcdb7c5534eb4",
}
}
event = self.store_event(
data={
"type": "transaction",
"transaction": "/organizations/:orgId/performance/:eventSlug/",
"start_timestamp": before_now(minutes=1, milliseconds=500).isoformat(),
"timestamp": before_now(minutes=1).isoformat(),
"contexts": {
"trace": {
"trace_id": "ff62a8b040f340bda5d830223def1d81",
"span_id": "8f5a2b8768cafb4e",
"type": "trace",
}
},
"spans": [
{
"description": """select "table"."column_name", "table"."column name" from "table" where "something" = $1""",
"op": "db",
"parent_span_id": "abe79ad9292b90a9",
"span_id": "9c045ea336297177",
"start_timestamp": before_now(minutes=1, milliseconds=200).timestamp(),
"timestamp": before_now(minutes=1).timestamp(),
"trace_id": "ff62a8b040f340bda5d830223def1d81",
},
{
"description": "http span",
"op": "http",
"parent_span_id": "a99fd04e79e17631",
"span_id": "abe79ad9292b90a9",
"start_timestamp": before_now(minutes=1, milliseconds=200).timestamp(),
"timestamp": before_now(minutes=1).timestamp(),
"trace_id": "ff62a8b040f340bda5d830223def1d81",
},
],
},
project_id=self.project.id,
)
result = serialize(event, None, SqlFormatEventSerializer())
# For span 1: Should remove quotes from all terms except the one that contains a space ("column name")
assert (
result["entries"][0]["data"][0]["description"]
== """select table.column_name, table."column name"\nfrom table\nwhere something = $1"""
)
# For span 2: Not a db span so no change
assert result["entries"][0]["data"][1]["description"] == """http span"""
def test_db_formatting_perf_optimizations(self) -> None:
SQL_QUERY_OK = """select * from table where something in (%s, %s, %s)"""
SQL_QUERY_TOO_LARGE = "a" * 1501
event = self.store_event(
data={
"breadcrumbs": [
{
"category": "query",
"message": SQL_QUERY_OK,
},
{
"category": "query",
"message": SQL_QUERY_OK,
},
{
"category": "query",
"message": SQL_QUERY_TOO_LARGE,
},
]
+ [{"category": "query", "message": str(i)} for i in range(0, 30)]
},
project_id=self.project.id,
)
with mock.patch("sqlparse.format", return_value="") as mock_format:
serialize(event, None, SqlFormatEventSerializer())
assert (
len(
list(
filter(
lambda args: SQL_QUERY_OK in args[0],
mock_format.call_args_list,
)
)
)
== 1
), "SQL_QUERY_OK should have been formatted a single time"
assert not any(
SQL_QUERY_TOO_LARGE in args[0] for args in mock_format.call_args_list
), "SQL_QUERY_TOO_LARGE should not have been formatted"
assert mock_format.call_count == 20, "Format should have been called 20 times"
| SqlFormatEventSerializerTest |
python | matplotlib__matplotlib | lib/matplotlib/category.py | {
"start": 4196,
"end": 5128
} | class ____(ticker.Formatter):
"""String representation of the data at every tick."""
def __init__(self, units_mapping):
"""
Parameters
----------
units_mapping : dict
Mapping of category names (str) to indices (int).
"""
self._units = units_mapping
def __call__(self, x, pos=None):
# docstring inherited
return self.format_ticks([x])[0]
def format_ticks(self, values):
# docstring inherited
r_mapping = {v: self._text(k) for k, v in self._units.items()}
return [r_mapping.get(round(val), '') for val in values]
@staticmethod
def _text(value):
"""Convert text values into utf-8 or ascii strings."""
if isinstance(value, bytes):
value = value.decode(encoding='utf-8')
elif not isinstance(value, str):
value = str(value)
return value
| StrCategoryFormatter |
python | django-import-export__django-import-export | import_export/exceptions.py | {
"start": 319,
"end": 1002
} | class ____(ImportExportError):
def __init__(self, error, number=None, row=None):
"""A wrapper for errors thrown from the import process.
:param error: The underlying error that occurred.
:param number: The row number of the row containing the error (if obtainable).
:param row: The row containing the error (if obtainable).
"""
self.error = error
self.number = number
self.row = row
def __str__(self):
s = ""
if self.number is not None:
s += f"{self.number}: "
s += f"{self.error}"
if self.row is not None:
s += f" ({self.row})"
return s
| ImportError |
python | kamyu104__LeetCode-Solutions | Python/fair-candy-swap.py | {
"start": 37,
"end": 366
} | class ____(object):
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
diff = (sum(A)-sum(B))//2
setA = set(A)
for b in set(B):
if diff+b in setA:
return [diff+b, b]
return []
| Solution |
python | django__django | tests/model_fields/test_charfield.py | {
"start": 1760,
"end": 3853
} | class ____(SimpleTestCase):
class Choices(models.TextChoices):
C = "c", "C"
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
msg = "This field cannot be blank."
with self.assertRaisesMessage(ValidationError, msg):
f.clean("", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual("", f.clean("", None))
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1, choices=[("a", "A"), ("b", "B")])
self.assertEqual("a", f.clean("a", None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[("a", "A"), ("b", "B")])
msg = "Value 'not a' is not a valid choice."
with self.assertRaisesMessage(ValidationError, msg):
f.clean("not a", None)
def test_enum_choices_cleans_valid_string(self):
f = models.CharField(choices=self.Choices, max_length=1)
self.assertEqual(f.clean("c", None), "c")
def test_enum_choices_invalid_input(self):
f = models.CharField(choices=self.Choices, max_length=1)
msg = "Value 'a' is not a valid choice."
with self.assertRaisesMessage(ValidationError, msg):
f.clean("a", None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
msg = "This field cannot be null."
with self.assertRaisesMessage(ValidationError, msg):
f.clean(None, None)
def test_callable_choices(self):
def get_choices():
return {str(i): f"Option {i}" for i in range(3)}
f = models.CharField(max_length=1, choices=get_choices)
for i in get_choices():
with self.subTest(i=i):
self.assertEqual(i, f.clean(i, None))
with self.assertRaises(ValidationError):
f.clean("A", None)
with self.assertRaises(ValidationError):
f.clean("3", None)
| ValidationTests |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 1935,
"end": 2060
} | class ____:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
| ReprWrapper |
python | plotly__plotly.py | plotly/express/_special_inputs.py | {
"start": 958,
"end": 1300
} | class ____(object):
"""
Objects of this class can be passed to Plotly Express functions that expect column
identifiers or list-like objects to indicate that this attribute should be mapped
onto integers starting at 0. An optional label can be provided.
"""
def __init__(self, label=None):
self.label = label
| Range |
python | sympy__sympy | sympy/codegen/fnodes.py | {
"start": 18265,
"end": 18363
} | class ____(FFunction):
""" Fortran sign intrinsic for integer arguments. """
nargs = 2
| isign |
python | ray-project__ray | python/ray/data/_internal/datasource/avro_datasource.py | {
"start": 381,
"end": 1484
} | class ____(FileBasedDatasource):
"""A datasource that reads Avro files."""
_FILE_EXTENSIONS = ["avro"]
def __init__(
self,
paths: Union[str, List[str]],
**file_based_datasource_kwargs,
):
super().__init__(paths, **file_based_datasource_kwargs)
_check_import(self, module="fastavro", package="fastavro")
def _read_stream(self, f: "pyarrow.NativeFile", path: str) -> Iterator[Block]:
import fastavro
# Read the Avro file. This assumes the Avro file includes its schema.
reader = fastavro.reader(f)
ctx = DataContext.get_current()
output_block_size_option = OutputBlockSizeOption.of(
target_max_block_size=ctx.target_max_block_size
)
output_buffer = BlockOutputBuffer(output_block_size_option)
for record in reader:
output_buffer.add(record)
while output_buffer.has_next():
yield output_buffer.next()
output_buffer.finalize()
while output_buffer.has_next():
yield output_buffer.next()
| AvroDatasource |
python | apache__airflow | scripts/ci/prek/lint_json_schema.py | {
"start": 3396,
"end": 6137
} | class ____(Exception):
pass
def load_file(file_path: str):
"""Loads a file using a serializer which guesses based on the file extension"""
if file_path.lower().endswith(".json"):
with open(file_path) as input_file:
return json.load(input_file)
elif file_path.lower().endswith((".yaml", ".yml")):
with open(file_path) as input_file:
return yaml.safe_load(input_file)
raise _ValidatorError("Unknown file format. Supported extension: '.yaml', '.json'")
def _get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Validates the file using JSON Schema specifications")
parser.add_argument(
"--enforce-defaults", action="store_true", help="Values must match the default in the schema"
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--spec-file", help="The path to specification")
group.add_argument("--spec-url", help="The URL to specification")
parser.add_argument("file", nargs="+")
return parser
def _process_files(validator, file_paths: list[str]):
exit_code = 0
for input_path in file_paths:
print("Processing file: ", input_path)
instance = load_file(input_path)
for error in validator.iter_errors(instance):
print(error)
exit_code = 1
return exit_code
def _create_validator(schema, enforce_defaults: bool):
cls = validator_for(schema)
cls.check_schema(schema)
if enforce_defaults:
cls = extend(cls, {"default": _default_validator})
return cls(schema)
def _default_validator(validator, default, instance, schema):
# We will also accept a "See values.yaml" default
if default != instance and default != "See values.yaml":
yield ValidationError(f"{instance} is not equal to the default of {default}")
def _load_spec(spec_file: str | None, spec_url: str | None):
if spec_url:
spec_file = fetch_and_cache(url=spec_url, output_filename=re.sub(r"[^a-zA-Z0-9]", "-", spec_url))
if not spec_file:
raise ValueError(f"The {spec_file} was None and {spec_url} did not lead to any file loading.")
with open(spec_file) as schema_file:
schema = json.loads(schema_file.read())
return schema
def main() -> int:
"""Main code"""
parser = _get_parser()
args = parser.parse_args()
spec_url = args.spec_url
spec_file = args.spec_file
enforce_defaults = args.enforce_defaults
schema = _load_spec(spec_file, spec_url)
validator = _create_validator(schema, enforce_defaults)
file_paths = args.file
exit_code = _process_files(validator, file_paths)
return exit_code
sys.exit(main())
| _ValidatorError |
python | django__django | tests/view_tests/tests/test_debug.py | {
"start": 1887,
"end": 2382
} | class ____(SimpleTestCase):
"""Unittests for CallableSettingWrapper"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
| CallableSettingWrapperTests |
python | redis__redis-py | redis/commands/search/document.py | {
"start": 0,
"end": 413
} | class ____:
"""
Represents a single document in a result set
"""
def __init__(self, id, payload=None, **fields):
self.id = id
self.payload = payload
for k, v in fields.items():
setattr(self, k, v)
def __repr__(self):
return f"Document {self.__dict__}"
def __getitem__(self, item):
value = getattr(self, item)
return value
| Document |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-openrouter/llama_index/llms/openrouter/base.py | {
"start": 490,
"end": 2975
} | class ____(OpenAILike):
"""
OpenRouter LLM.
To instantiate the `OpenRouter` class, you will need to provide an API key. You can set the API key either as an environment variable `OPENROUTER_API_KEY` or directly in the class
constructor. If setting it in the class constructor, it would look like this:
If you haven't signed up for an API key yet, you can do so on the OpenRouter website at (https://openrouter.ai). Once you have your API key, you can use the `OpenRouter` class to interact
with the LLM for tasks like chatting, streaming, and completing prompts.
Examples:
`pip install llama-index-llms-openrouter`
```python
from llama_index.llms.openrouter import OpenRouter
llm = OpenRouter(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="gryphe/mythomax-l2-13b",
)
response = llm.complete("Hello World!")
print(str(response))
```
"""
model: str = Field(
description="The OpenRouter model to use. See https://openrouter.ai/models for options."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. See https://openrouter.ai/models for options.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "OPENROUTER_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "OPENROUTER_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "OpenRouter_LLM"
| OpenRouter |
python | pytorch__pytorch | torch/_dynamo/utils.py | {
"start": 144825,
"end": 161611
} | class ____:
# inclusive
left_end_lineno: int
left_end_offset: int
right_start_lineno: int
# exclusive
right_start_offset: int
def _extract_anchors_from_expr(segment: str) -> Optional[_Anchors]:
"""
Given source code `segment` corresponding to a bytecode
instruction, determine:
- for binary ops, the location of the binary op
- for indexing, the location of the brackets.
`segment` is expected to be a valid Python expression
"""
assert sys.version_info >= (3, 11)
import ast
try:
# Without brackets, `segment` is parsed as a statement.
# We expect an expression, so wrap `segment` in
# brackets to handle multi-line expressions.
tree = ast.parse("(\n" + segment + "\n)")
except SyntaxError:
return None
if len(tree.body) != 1:
return None
lines = segment.split("\n")
# get character index given byte offset
def normalize(lineno: int, offset: int) -> int:
return _fix_offset(lines[lineno], offset)
# Gets the next valid character index in `lines`, if
# the current location is not valid. Handles empty lines.
def next_valid_char(lineno: int, col: int) -> tuple[int, int]:
while lineno < len(lines) and col >= len(lines[lineno]):
col = 0
lineno += 1
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
# Get the next valid character index in `lines`.
def increment(lineno: int, col: int) -> tuple[int, int]:
col += 1
lineno, col = next_valid_char(lineno, col)
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
# Get the next valid character at least on the next line
def nextline(lineno: int, col: int) -> tuple[int, int]:
col = 0
lineno += 1
lineno, col = next_valid_char(lineno, col)
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
statement = tree.body[0]
if isinstance(statement, ast.Expr):
expr = statement.value
if isinstance(expr, ast.BinOp):
# ast gives locations for BinOp subexpressions, e.g.
# ( left_expr ) + ( right_expr )
# left^^^^^ right^^^^^
# -2 since end_lineno is 1-indexed and because we added an extra
# bracket to `segment` when calling ast.parse
cur_lineno = cast(int, expr.left.end_lineno) - 2
assert expr.left.end_col_offset is not None
cur_col = normalize(cur_lineno, expr.left.end_col_offset)
cur_lineno, cur_col = next_valid_char(cur_lineno, cur_col)
# Heuristic to find the operator character.
# The original CPython implementation did not look for ), \, or #,
# leading to incorrect anchor location, e.g.
# (x) + (y)
# ~~^~~~~~~
while (ch := lines[cur_lineno][cur_col]).isspace() or ch in ")\\#":
if ch in "\\#":
cur_lineno, cur_col = nextline(cur_lineno, cur_col)
else:
cur_lineno, cur_col = increment(cur_lineno, cur_col)
# binary op is 1 or 2 characters long, on the same line
right_col = cur_col + 1
if (
right_col < len(lines[cur_lineno])
and not (ch := lines[cur_lineno][right_col]).isspace()
and ch not in "\\#"
):
right_col += 1
# right_col can be invalid since it is exclusive
return _Anchors(cur_lineno, cur_col, cur_lineno, right_col)
elif isinstance(expr, ast.Subscript):
# ast gives locations for value and slice subexpressions, e.g.
# ( value_expr ) [ slice_expr ]
# value^^^^^ slice^^^^^
# subscript^^^^^^^^^^^^^^^^^^^^
# find left bracket (first '[' after value)
left_lineno = cast(int, expr.value.end_lineno) - 2
assert expr.value.end_col_offset is not None
left_col = normalize(left_lineno, expr.value.end_col_offset)
left_lineno, left_col = next_valid_char(left_lineno, left_col)
while lines[left_lineno][left_col] != "[":
left_lineno, left_col = increment(left_lineno, left_col)
# find right bracket (final character of expression)
right_lineno = cast(int, expr.end_lineno) - 2
assert expr.end_col_offset is not None
right_col = normalize(right_lineno, expr.end_col_offset)
return _Anchors(left_lineno, left_col, right_lineno, right_col)
elif isinstance(expr, ast.Call):
# ( func_expr ) (args, kwargs)
# func^^^^^
# call^^^^^^^^^^^^^^^^^^^^^^^^
# find left bracket (first '(' after func)
left_lineno = cast(int, expr.func.end_lineno) - 2
assert expr.func.end_col_offset is not None
left_col = normalize(left_lineno, expr.func.end_col_offset)
left_lineno, left_col = next_valid_char(left_lineno, left_col)
while lines[left_lineno][left_col] != "(":
left_lineno, left_col = increment(left_lineno, left_col)
# find right bracket (final character of expression)
right_lineno = cast(int, expr.end_lineno) - 2
assert expr.end_col_offset is not None
right_col = normalize(right_lineno, expr.end_col_offset)
return _Anchors(left_lineno, left_col, right_lineno, right_col)
return None
def get_instruction_source_311(code: types.CodeType, inst: dis.Instruction) -> str:
"""
Python 3.11+ only. Returns lines of source code (from code object `code`)
corresponding to `inst`'s location data, and underlines relevant code to `inst`.
Example: CALL on `g`:
f(g(
^^
h(x)))
^^^^^
We need our own implementation in < 3.13 since `format_frame_summary` in
Python's `traceback` module doesn't handle multi-line expressions
(and their anchor extraction code is not completely correct).
"""
if sys.version_info >= (3, 13):
# multiline traceback implemented in 3.13+
frame_summary = traceback.FrameSummary(
code.co_filename,
inst.positions.lineno,
code.co_name,
end_lineno=inst.positions.end_lineno,
colno=inst.positions.col_offset,
end_colno=inst.positions.end_col_offset,
)
result = traceback.format_list([frame_summary])[0]
# remove first line containing filename info
result = "\n".join(result.splitlines()[1:])
# indent lines with original indentation
orig_lines = [
linecache.getline(code.co_filename, lineno).rstrip()
for lineno in range(inst.positions.lineno, inst.positions.end_lineno + 1)
]
orig_lines_dedent = textwrap.dedent("\n".join(orig_lines)).splitlines()
indent_len = len(orig_lines[0]) - len(orig_lines_dedent[0])
indent = orig_lines[0][:indent_len]
result = textwrap.indent(textwrap.dedent(result), indent)
return result
assert inst.positions is not None
if inst.positions.lineno is None:
return ""
# The rstrip + "\n" pattern is used throughout this function to handle
# linecache.getline errors. Error lines are treated as empty strings "", but we want
# to treat them as blank lines "\n".
first_line = linecache.getline(code.co_filename, inst.positions.lineno).rstrip()
if inst.positions.end_lineno is None:
return first_line
if inst.positions.col_offset is None or inst.positions.end_col_offset is None:
return first_line
# character index of the start of the instruction
start_offset = _fix_offset(first_line, inst.positions.col_offset)
# character index of the end of the instruction
# compute later since end may be a different line
end_offset = None
# expression corresponding to the instruction so we can get anchors
segment = ""
# underline markers to be printed - start with `~` marker and replace with `^` later
markers = []
# Compute segment and initial markers
if inst.positions.end_lineno == inst.positions.lineno:
end_offset = _fix_offset(first_line, inst.positions.end_col_offset)
segment = first_line[start_offset:end_offset]
markers.append(" " * start_offset + "~" * (end_offset - start_offset))
else:
segment = first_line[start_offset:] + "\n"
markers.append(" " * start_offset + "~" * (len(first_line) - start_offset))
last_line = linecache.getline(
code.co_filename, inst.positions.end_lineno
).rstrip()
end_offset = _fix_offset(last_line, inst.positions.end_col_offset)
for lineno in range(inst.positions.lineno + 1, inst.positions.end_lineno):
line = linecache.getline(code.co_filename, lineno).rstrip()
segment += line + "\n"
# don't underline leading spaces
num_spaces = len(line) - len(line.lstrip())
markers.append(" " * num_spaces + "~" * (len(line) - num_spaces))
segment += last_line[:end_offset]
num_spaces = len(last_line) - len(last_line.lstrip())
markers.append(" " * num_spaces + "~" * (end_offset - num_spaces))
anchors: Optional[_Anchors] = None
try:
anchors = _extract_anchors_from_expr(segment)
except AssertionError:
pass
# replace `~` markers with `^` where necessary
if anchors is None:
markers = [marker.replace("~", "^") for marker in markers]
else:
# make markers mutable
mutable_markers: list[list[str]] = [list(marker) for marker in markers]
# anchor positions do not take start_offset into account
if anchors.left_end_lineno == 0:
anchors.left_end_offset += start_offset
if anchors.right_start_lineno == 0:
anchors.right_start_offset += start_offset
# Turn `~`` markers between anchors to `^`
for lineno in range(len(markers)):
for col in range(len(mutable_markers[lineno])):
if lineno < anchors.left_end_lineno:
continue
if lineno == anchors.left_end_lineno and col < anchors.left_end_offset:
continue
if (
lineno == anchors.right_start_lineno
and col >= anchors.right_start_offset
):
continue
if lineno > anchors.right_start_lineno:
continue
if mutable_markers[lineno][col] == "~":
mutable_markers[lineno][col] = "^"
# make markers into strings again
markers = ["".join(marker) for marker in mutable_markers]
result = ""
for i in range(len(markers)):
result += (
linecache.getline(code.co_filename, inst.positions.lineno + i).rstrip()
+ "\n"
)
result += markers[i] + "\n"
return result
def get_static_address_type(t: Any) -> Any:
if isinstance(t, torch.Tensor):
return getattr(t, "_dynamo_static_input_type", None)
return None
def is_rng_state_getter_or_setter(value: Any) -> bool:
getters = (
# The following two functions are not identical, so don't remove anyone!
torch._C.Generator.get_state,
torch.default_generator.get_state,
torch.get_rng_state,
torch.cuda.get_rng_state,
)
setters = (
torch._C.Generator.set_state,
torch.default_generator.set_state,
torch.set_rng_state,
torch.cuda.set_rng_state,
)
return value in (*setters, *getters)
def is_tensor_base_attr_getter(value: Any) -> bool:
return (
isinstance(value, types.MethodWrapperType)
and value.__name__ == "__get__"
and value.__self__.__objclass__ is torch._C._TensorBase # type: ignore[attr-defined]
)
def is_tensor_getset_descriptor(name: str) -> bool:
try:
attr = inspect.getattr_static(torch.Tensor, name)
return type(attr) is types.GetSetDescriptorType
except AttributeError:
return False
def is_torch_function_object(value: Any) -> bool:
return hasattr(value, "__torch_function__")
def has_torch_function(vt: VariableTracker) -> bool:
# This emulates
# https://github.com/pytorch/pytorch/blob/8d81806211bc3c0ee6c2ef235017bacf1d775a85/torch/csrc/utils/disable_torch_function.cpp#L315-L323
from torch._dynamo.variables import UserDefinedObjectVariable
from torch._dynamo.variables.torch_function import TensorWithTFOverrideVariable
# Note on lazy vars: The value will either be realized or not throughout the course of execution
# if the value has a torch function, it will eventually be realized so we can realize it here
# if the value does not have a torch function, it may or may not be realized
# if it is realized it will be used and guards will be installed properly
# if it is not used, guards won't be installed, and it doesn't matter
# if the value has a torch function or not, so we should *not* realize it.
# NB: We technically know that if is_realized is False, LazyVariableTracker has the peek_value method
# but mypy does not unfortunately
if vt.is_realized() or (
hasattr(vt, "peek_value") and hasattr(vt.peek_value(), "__torch_function__")
):
func = None
if isinstance(vt, TensorWithTFOverrideVariable):
func = getattr(vt.class_type, "__torch_function__", None)
elif isinstance(vt, UserDefinedObjectVariable):
func = getattr(vt.value, "__torch_function__", None)
return func not in (None, torch._C._disabled_torch_function_impl)
return False
# see note [Tensor Fakification and Symbol Caching]
def to_fake_tensor(
t: torch.Tensor, fake_mode: torch._subclasses.fake_tensor.FakeTensorMode
) -> Any:
symbolic_context = None
source = None
if tracing_context := torch._guards.TracingContext.try_get():
if t in tracing_context.tensor_to_context:
symbolic_context = tracing_context.tensor_to_context[t]
source = symbolic_context.tensor_source
return fake_mode.from_tensor(
t, static_shapes=False, symbolic_context=symbolic_context, source=source
)
# NB: this works for both classes and instances
def is_frozen_dataclass(value: Any) -> bool:
return (
not object_has_getattribute(value)
and not class_has_getattribute(value)
and is_dataclass(value)
and hasattr(value, "__dataclass_params__")
and hasattr(value.__dataclass_params__, "frozen")
and value.__dataclass_params__.frozen
)
def get_first_attr(obj: Any, *attrs: str) -> Any:
"""
Return the first available attribute or throw an exception if none is present.
"""
for attr in attrs:
if hasattr(obj, attr):
return getattr(obj, attr)
raise AssertionError(f"{obj} does not has any of the attributes: {attrs}")
@contextlib.contextmanager
def maybe_enable_compiled_autograd(
should_enable: bool, fullgraph: bool = True, dynamic: bool = True
) -> Generator[Any, None, None]:
if not should_enable:
yield
else:
def compiler_fn(gm: Any) -> Any:
def inner_compiler(gm_: Any, example_inputs_: Any) -> Any:
torch._dynamo.utils.counters["compiled_autograd"]["compiles"] += 1
return torch._inductor.compile(gm_, example_inputs_)
return torch.compile(
gm, backend=inner_compiler, fullgraph=fullgraph, dynamic=dynamic
)
with torch._dynamo.compiled_autograd._enable(compiler_fn) as ctx:
yield ctx
def invalid_removeable_handle() -> RemovableHandle:
# need a subclass so weakref works
class Invalid(dict): # type: ignore[type-arg]
pass
return RemovableHandle(Invalid())
# Returns a "proxy" (new object with the same class and dict) for (non-GraphModule) nn.Module's.
# Attribute changes to the original object/proxy will be reflected in the other.
# This is useful for cases where we want a keep-alive reference to a module without increasing
# its reference count.
def nn_module_proxy(mod: Any) -> Any:
if not isinstance(mod, torch.nn.Module):
return mod
if isinstance(mod, torch.fx.GraphModule):
# Dynamo-generated GM's shouldn't contain user-created GM's
return mod
proxy = mod.__class__.__new__(mod.__class__)
proxy.__dict__ = mod.__dict__
return proxy
| _Anchors |
python | mlflow__mlflow | mlflow/entities/param.py | {
"start": 129,
"end": 1133
} | class ____(_MlflowObject):
"""
Parameter object.
"""
def __init__(self, key, value):
if "pyspark.ml" in sys.modules:
import pyspark.ml.param
if isinstance(key, pyspark.ml.param.Param):
key = key.name
value = str(value)
self._key = key
self._value = value
@property
def key(self):
"""String key corresponding to the parameter name."""
return self._key
@property
def value(self):
"""String value of the parameter."""
return self._value
def to_proto(self):
param = ProtoParam()
param.key = self.key
param.value = self.value
return param
@classmethod
def from_proto(cls, proto):
return cls(proto.key, proto.value)
def __eq__(self, __o):
if isinstance(__o, self.__class__):
return self._key == __o._key
return False
def __hash__(self):
return hash(self._key)
| Param |
python | sanic-org__sanic | sanic/response/types.py | {
"start": 689,
"end": 7427
} | class ____:
"""The base class for all HTTP Responses"""
__slots__ = (
"asgi",
"body",
"content_type",
"stream",
"status",
"headers",
"_cookies",
)
_dumps = json_dumps
def __init__(self):
self.asgi: bool = False
self.body: Optional[bytes] = None
self.content_type: Optional[str] = None
self.stream: Optional[Union[Http, ASGIApp, HTTPReceiver]] = None
self.status: int = None
self.headers = Header({})
self._cookies: Optional[CookieJar] = None
def __repr__(self):
class_name = self.__class__.__name__
return f"<{class_name}: {self.status} {self.content_type}>"
def _encode_body(self, data: Optional[str | bytes]):
if data is None:
return b""
return data.encode() if hasattr(data, "encode") else data # type: ignore
@property
def cookies(self) -> CookieJar:
"""The response cookies.
See [Cookies](/en/guide/basics/cookies.html)
Returns:
CookieJar: The response cookies
"""
if self._cookies is None:
self._cookies = CookieJar(self.headers)
return self._cookies
@property
def processed_headers(self) -> Iterator[tuple[bytes, bytes]]:
"""Obtain a list of header tuples encoded in bytes for sending.
Add and remove headers based on status and content_type.
Returns:
Iterator[Tuple[bytes, bytes]]: A list of header tuples encoded in bytes for sending
""" # noqa: E501
if has_message_body(self.status):
self.headers.setdefault("content-type", self.content_type)
# Encode headers into bytes
return (
(name.encode("ascii"), f"{value}".encode(errors="surrogateescape"))
for name, value in self.headers.items()
)
async def send(
self,
data: Optional[AnyStr] = None,
end_stream: Optional[bool] = None,
) -> None:
"""Send any pending response headers and the given data as body.
Args:
data (Optional[AnyStr], optional): str or bytes to be written. Defaults to `None`.
end_stream (Optional[bool], optional): whether to close the stream after this block. Defaults to `None`.
""" # noqa: E501
if data is None and end_stream is None:
end_stream = True
if self.stream is None:
raise SanicException(
"No stream is connected to the response object instance."
)
if self.stream.send is None:
if end_stream and not data:
return
raise ServerError(
"Response stream was ended, no more response data is "
"allowed to be sent."
)
data = data.encode() if hasattr(data, "encode") else data or b"" # type: ignore
await self.stream.send(
data, # type: ignore
end_stream=end_stream or False,
)
def add_cookie(
self,
key: str,
value: str,
*,
path: str = "/",
domain: Optional[str] = None,
secure: bool = True,
max_age: Optional[int] = None,
expires: Optional[datetime] = None,
httponly: bool = False,
samesite: Optional[SameSite] = "Lax",
partitioned: bool = False,
comment: Optional[str] = None,
host_prefix: bool = False,
secure_prefix: bool = False,
) -> Cookie:
"""Add a cookie to the CookieJar
See [Cookies](/en/guide/basics/cookies.html)
Args:
key (str): The key to be added
value (str): The value to be added
path (str, optional): Path of the cookie. Defaults to `"/"`.
domain (Optional[str], optional): Domain of the cookie. Defaults to `None`.
secure (bool, optional): Whether the cookie is secure. Defaults to `True`.
max_age (Optional[int], optional): Max age of the cookie. Defaults to `None`.
expires (Optional[datetime], optional): Expiry date of the cookie. Defaults to `None`.
httponly (bool, optional): Whether the cookie is http only. Defaults to `False`.
samesite (Optional[SameSite], optional): SameSite policy of the cookie. Defaults to `"Lax"`.
partitioned (bool, optional): Whether the cookie is partitioned. Defaults to `False`.
comment (Optional[str], optional): Comment of the cookie. Defaults to `None`.
host_prefix (bool, optional): Whether to add __Host- as a prefix to the key. This requires that path="/", domain=None, and secure=True. Defaults to `False`.
secure_prefix (bool, optional): Whether to add __Secure- as a prefix to the key. This requires that secure=True. Defaults to `False`.
Returns:
Cookie: The cookie that was added
""" # noqa: E501
return self.cookies.add_cookie(
key=key,
value=value,
path=path,
domain=domain,
secure=secure,
max_age=max_age,
expires=expires,
httponly=httponly,
samesite=samesite,
partitioned=partitioned,
comment=comment,
host_prefix=host_prefix,
secure_prefix=secure_prefix,
)
def delete_cookie(
self,
key: str,
*,
path: str = "/",
domain: Optional[str] = None,
host_prefix: bool = False,
secure_prefix: bool = False,
) -> None:
"""Delete a cookie
This will effectively set it as Max-Age: 0, which a browser should
interpret it to mean: "delete the cookie".
Since it is a browser/client implementation, your results may vary
depending upon which client is being used.
See [Cookies](/en/guide/basics/cookies.html)
Args:
key (str): The key to be deleted
path (str, optional): Path of the cookie. Defaults to `"/"`.
domain (Optional[str], optional): Domain of the cookie. Defaults to `None`.
host_prefix (bool, optional): Whether to add __Host- as a prefix to the key. This requires that path="/", domain=None, and secure=True. Defaults to `False`.
secure_prefix (bool, optional): Whether to add __Secure- as a prefix to the key. This requires that secure=True. Defaults to `False`.
""" # noqa: E501
self.cookies.delete_cookie(
key=key,
path=path,
domain=domain,
host_prefix=host_prefix,
secure_prefix=secure_prefix,
)
| BaseHTTPResponse |
python | pytorch__pytorch | test/functorch/test_vmap.py | {
"start": 145280,
"end": 194136
} | class ____(TestCase):
def vmap_outplace_test(
self,
func,
args,
kwargs,
in_dims,
check_shape_only=False,
postprocess_fn=None,
out_dim=0,
):
for vmap_out, loop_out in compute_quantities_for_vmap_test(
func, args, kwargs, in_dims, out_dim=out_dim
):
if postprocess_fn is not None:
loop_out = postprocess_fn(loop_out)
vmap_out = postprocess_fn(vmap_out)
if check_shape_only:
self.assertEqual(vmap_out.shape, loop_out.shape)
continue
self.assertEqual(vmap_out, loop_out)
def vmap_inplace_test(
self, func, args, kwargs, in_dims, postprocess_fn=None, out_dim=0
):
# NB: This test assumes that the first argument is being modified.
# This is OK because it's what every other OpInfo-based test assumes,
# but it is going to need a more robust solution eventually.
if in_dims[0] is None:
# Check that we correctly raise an error when vmap is impossible
# on the in-place operation
with self.assertRaises(RuntimeError):
for _ in compute_quantities_for_vmap_test(
func,
args,
kwargs,
in_dims,
out_dim=out_dim,
compute_loop_out=False,
clone_inputs=True,
):
pass
return
for vmap_out, loop_out in compute_quantities_for_vmap_test(
func,
args,
kwargs,
in_dims,
clone_inputs=True,
out_dim=out_dim,
):
if postprocess_fn is not None:
loop_out = postprocess_fn(loop_out)
vmap_out = postprocess_fn(vmap_out)
self.assertEqual(vmap_out, loop_out)
def opinfo_vmap_test(
self,
device,
dtype,
op,
check_has_batch_rule,
skip_inplace=(),
postprocess_fn=None,
):
def test():
# Error inputs check
if op.error_inputs_func is not None:
error_inputs = op.error_inputs(device)
for error_input in error_inputs:
sample_input = error_input.sample_input
args = (sample_input.input,) + tuple(sample_input.args)
kwargs = sample_input.kwargs
for batched_args, in_dims, _ in generate_vmap_inputs(args, {}):
with self.assertRaises(Exception):
vmap(op, in_dims)(*batched_args, **kwargs)
# Sample inputs check
sample_inputs_op = {
# Take too long with reference inputs
"special.chebyshev_polynomial_t",
"special.chebyshev_polynomial_u",
"special.chebyshev_polynomial_v",
"special.chebyshev_polynomial_w",
"special.hermite_polynomial_he",
"special.laguerre_polynomial_l",
"special.legendre_polynomial_p",
"special.shifted_chebyshev_polynomial_t",
"special.shifted_chebyshev_polynomial_u",
"special.shifted_chebyshev_polynomial_v",
"special.shifted_chebyshev_polynomial_w",
}
if op.name in sample_inputs_op:
sample_inputs_itr = op.sample_inputs(
device, dtype, requires_grad=False, use_subtests=True
)
else:
sample_inputs_itr = op.reference_inputs(
device, dtype, requires_grad=False, use_subtests=True
)
aliases, inplace_aliases = discover_variants(op)
check_shape_only = op.name in ("empty_like", "new_empty")
for sample_input, subtest_ctx, skip_xfail_ctx in sample_inputs_itr:
with subtest_ctx(self), skip_xfail_ctx(self):
args = (sample_input.input,) + sample_input.args
if not any(isinstance(arg, torch.Tensor) for arg in args):
# At least one tensor required for vmap.
continue
kwargs = sample_input.kwargs
is_batch_norm_and_training = is_batch_norm_training(op.name, kwargs)
out_dim = 0
if op.name == "NumpySplitCopyWithIntCustomOp":
# special case for this custom op
def sample_vmap_out_dim_numpy_split_copy_with_int(
x, splits, dim
):
return [0 for _ in range(len(splits) + 1)], None
out_dim = sample_vmap_out_dim_numpy_split_copy_with_int(*args)
for batched_args, in_dims, _ in generate_vmap_inputs(
args, {}, is_batch_norm_and_training=is_batch_norm_and_training
):
for func in aliases:
self.vmap_outplace_test(
func,
batched_args,
kwargs,
in_dims,
check_shape_only,
postprocess_fn,
out_dim=out_dim,
)
if op.name in skip_inplace:
continue
if not is_valid_inplace_sample_input(
sample_input, op, op.inplace_variant
):
continue
for func in inplace_aliases:
self.vmap_inplace_test(
func, batched_args, kwargs, in_dims, postprocess_fn
)
if check_has_batch_rule:
check_vmap_fallback(self, test, op)
else:
test()
vmap_fail = {
# -------------------- ALLOWED FAILURES --------------------------------
# These are things that we either cannot fix or are not actually problems
xfail("resize_"),
xfail("resize_as_"),
xfail("to_sparse"),
xfail("__getitem__"), # dynamic mask
xfail("index_put"), # dynamic mask
xfail(
"nn.functional.dropout"
), # works, can't check against for loop because of randomness inconsistency
xfail("nn.functional.scaled_dot_product_attention"), # randomness
xfail("nn.functional.multi_head_attention_forward"), # randomness
xfail("masked_select"), # dynamic op
xfail("nonzero"), # dynamic op
xfail("unique", ""), # dynamic op
xfail("unique_consecutive", ""), # dynamic op
xfail("allclose"), # returns a boolean
xfail("uniform"), # randomness is tested separately
xfail("rand_like"), # randomness is tested separately
xfail("randint_like"), # randomness is tested separately
xfail("randn_like"), # randomness is tested separately
xfail("bernoulli", ""), # randomness is tested separately
xfail("normal", ""), # randomness is tested separately
xfail("normal", "number_mean"), # randomness is tested separately
xfail("multinomial", ""), # randomness
xfail("nn.functional.embedding", ""), # we only support some cases
xfail("nn.functional.rrelu"), # randomness
xfail("nn.functional.dropout2d", ""), # randomness
xfail("nn.functional.dropout3d", ""), # randomness
xfail("nn.functional.alpha_dropout", ""), # randomness
xfail("nn.functional.feature_alpha_dropout", "with_train"), # randomness
xfail("as_strided"), # Our test runner can't handle this; manual test exists
xfail("as_strided_copy"),
xfail(
"as_strided_scatter"
), # no batching rule implemented, default doesn't work
skip(
"new_empty_strided"
), # empty tensor data is garbage so it's hard to make comparisons with it
xfail("nn.functional.fractional_max_pool3d"), # randomness
xfail("nn.functional.fractional_max_pool2d"), # randomness
xfail("pca_lowrank", ""), # random operation
xfail("svd_lowrank", ""), # random operation
xfail("sparse.sampled_addmm"), # sparse
xfail("sparse.mm", "reduce"), # sparse
xfail(
"NumpyCubeNotComposableAutogradFunction"
), # Not composable autograd.Function
skip("_softmax_backward_data"),
skip(
"linalg.eigh", ""
), # not always return the same result for the same input, see test_linalg_eigh for manual test
# UnimplementedError: data-dependent operators cannot be vmapped
xfail("NumpyNonzeroCustomOp"),
xfail("NumpyNMSCustomOp"),
# ----------------------------------------------------------------------
# ---------------------------- BUGS ------------------------------------
# entries in here don't work and need to be fixed.
# Each one of these is a bug
decorate("frexp", decorator=skipIfTorchDynamo()),
xfail("clamp_min", ""), # Exception not raised on error input
xfail("clamp_max", ""), # Exception not raised on error input
xfail(
"view_as_complex"
), # RuntimeError: Tensor must have a last dimension with stride 1
xfail("tensor_split"), # data_ptr
xfail(
"histogramdd"
), # expected Tensor as element 0 in argument 0, but got tuple
xfail("nn.functional.gaussian_nll_loss"), # data-dependent control flow error
xfail(
"nn.functional.embedding_bag"
), # embedding renorm vmap inplace incompatible
xfail("narrow"), # Batching rule not implemented for aten::narrow.Tensor
# required rank 4 tensor to use channels_last format
xfail("bfloat16"),
xfail("bool"),
xfail("byte"),
xfail("char"),
xfail("double"),
xfail("float"),
xfail("half"),
xfail("int"),
xfail("long"),
xfail("short"),
xfail("cdouble"),
xfail("cfloat"),
xfail(
"jiterator_binary", device_type="cuda"
), # NYI: querying is_contiguous inside of vmap
xfail(
"jiterator_binary_return_by_ref", device_type="cuda"
), # NYI: querying is_contiguous inside of vmap
xfail(
"jiterator_4inputs_with_extra_args", device_type="cuda"
), # NYI: querying is_contiguous inside of vmap
xfail(
"equal", ""
), # TypeError: object of type 'bool' has no len(); likely testrunner problem
xfail(
"jiterator_unary", device_type="cuda"
), # NYI: querying is_contiguous inside of vmap
xfail(
"jiterator_2inputs_2outputs", device_type="cuda"
), # NYI: querying is_contiguous inside of vmap
# ---------------------------------------------------------------------
# TypeError: expected Tensor as element 0 in argument 0, but got NotImplementedType
xfail("__rsub__"),
# RuntimeError: Batching rule not implemented for aten::moveaxis.int;
# the fallback path doesn't work on out= or view ops.
xfail("movedim"),
# RuntimeError: NYI: querying is_contiguous inside of vmap for
# memory_format other than torch.contiguous_format
xfail("contiguous"),
# RuntimeError: NYI: Tensor.clone(memory_format) inside vmap is only supported
# with memory_format torch.preserve_format or torch.contiguous_format (got ChannelsLast)
xfail("clone"),
# RuntimeError: When vmap-ing torch.nn.functional.one_hot,
# please provide an explicit positive num_classes argument.
xfail("nn.functional.one_hot"),
# RuntimeError: Expected all tensors to be on the same device,
# but found at least two devices, cuda:0 and cpu!
xfail("eq", device_type="cuda"),
xfail("ge", device_type="cuda"),
xfail("gt", device_type="cuda"),
xfail("le", device_type="cuda"),
xfail("lt", device_type="cuda"),
xfail("ne", device_type="cuda"),
# RuntimeError: aten::_flash_attention_forward hit the vmap fallback which is currently disabled
xfail("torch.ops.aten._flash_attention_forward"),
}
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(
op_db + additional_op_db + autograd_function_db + custom_op_db,
dtypes=OpDTypes.any_one,
)
@opsToleranceOverride(
"TestVmapOperatorsOpInfo",
"test_vmap_exhaustive",
(
tol1(
"linalg.det",
{torch.float32: tol(atol=1e-04, rtol=1e-04)},
device_type="cuda",
),
# The following is often flaky, but just on windows.
# We should investigate if it's actually a problem or not.
tol1(
"nn.functional.conv_transpose3d",
{torch.float32: tol(atol=1e-04, rtol=1e-02)},
device_type="cuda",
),
),
)
@toleranceOverride(
{
torch.float32: tol(atol=1e-04, rtol=1e-04),
torch.complex64: tol(atol=1e-04, rtol=1e-04),
}
)
@skipOps(
"TestVmapOperatorsOpInfo",
"test_vmap_exhaustive",
vmap_fail.union(
{
# RuntimeError: Batch norm got a batched tensor as input while the running_mean or running_var,
# which will be updated in place, were not batched.
xfail("native_batch_norm"),
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
xfail("tril"), # Exception not raised on error input
xfail("triu"), # Exception not raised on error input
xfail("as_strided", "partial_views"),
# RuntimeError: output with shape [4, 4] doesn't match the broadcast shape [1, 4, 4]
xfail("addcdiv"),
xfail("addcmul"),
xfail("clamp"),
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
# TypeError: expected Tensor as element 0 in argument 0, but got float
xfail("item"),
xfail(
"unbind_copy"
), # Batching rule not implemented for aten::unbind_copy.int.
# RuntimeError: required rank 4 tensor to use channels_last format
xfailIf(
"to",
lambda sample: (
sample.kwargs["memory_format"] == torch.channels_last
),
),
}
),
)
def test_vmap_exhaustive(self, device, dtype, op):
# needs to be fixed
inplace_failure_list = ()
self.opinfo_vmap_test(
device,
dtype,
op,
check_has_batch_rule=False,
skip_inplace=inplace_failure_list,
)
@with_tf32_off
@ops(
op_db + additional_op_db + autograd_function_db + custom_op_db,
dtypes=OpDTypes.any_one,
)
@opsToleranceOverride(
"TestVmapOperatorsOpInfo",
"test_op_has_batch_rule",
(
tol1(
"linalg.det",
{torch.float32: tol(atol=1e-04, rtol=1e-04)},
device_type="cuda",
),
),
)
@toleranceOverride(
{
torch.float32: tol(atol=1e-04, rtol=1e-04),
torch.complex64: tol(atol=1e-04, rtol=1e-04),
}
)
@skipOps(
"TestVmapOperatorsOpInfo",
"test_op_has_batch_rule",
vmap_fail.union(
{
xfail("as_strided", "partial_views"),
skip(
"to"
), # RuntimeError: required rank 4 tensor to use channels_last format
xfail("fill"),
# Batch norm got a batched tensor as input while the running_mean or running_var,
# which will be updated in place, were not batched.
xfail("native_batch_norm"),
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
xfail("histogram"),
# `index_put` OpInfo in pytorch/pytorch has
# masked index as input which is not supported
xfail("index_put", ""),
xfail("isin"),
xfail("masked_fill"),
xfail("masked_scatter"),
xfail("masked_select"),
xfail("nanquantile"),
xfail("ormqr"),
xfail("put"),
xfail("quantile"),
xfail("renorm"),
xfail("squeeze_copy"),
xfail("resize_as_"),
xfail("take"),
xfail("tensor_split"),
xfail("transpose_copy"),
xfail("to_sparse"),
# TypeError: expected Tensor as element 0 in argument 0, but got float
xfail("item"),
xfail("tril"), # Exception not raised on error input
xfail("triu"), # Exception not raised on error input
xfail(
"unbind_copy"
), # Batching rule not implemented for aten::unbind_copy.int.
xfail("__getitem__", ""),
xfail("count_nonzero"),
xfail(
"nn.functional.dropout"
), # works, can't check against for loop because of randomness inconsistency
xfail("nn.functional.scaled_dot_product_attention"), # randomness
xfail("nn.functional.multi_head_attention_forward"), # randomness
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
xfail("resize_"),
xfail("view_as_complex"),
xfail("fft.ihfft2"),
xfail("fft.ihfftn"),
xfail("allclose"),
xfail("argwhere"),
xfail("unique_consecutive"),
xfail("unique"),
xfail("nn.functional.ctc_loss"),
xfail("nn.functional.gaussian_nll_loss"),
xfail("histc"),
xfail("as_strided"),
xfail("as_strided_copy"),
xfail("permute_copy"),
xfail("t_copy"),
xfail("unsqueeze_copy"),
xfail("istft"),
xfail("nonzero"),
xfail("nn.functional.fractional_max_pool2d"),
xfail("stft"),
xfail("isclose"),
xfail("nn.functional.fractional_max_pool3d"),
xfail("nn.functional.bilinear"),
xfail("nn.functional.embedding_bag"),
xfail("linalg.tensorsolve"),
xfail("bernoulli", ""),
xfail("nn.functional.feature_alpha_dropout", "with_train"),
xfail("nn.functional.kl_div", ""),
xfail("multinomial", ""),
xfail("pca_lowrank", ""),
xfail("normal", ""),
xfail("nn.functional.dropout2d", ""),
xfail("normal", "number_mean"),
xfail("svd_lowrank", ""),
xfail("diagflat", ""),
xfail("special.log_ndtr"),
xfail(
"narrow"
), # Batching rule not implemented for aten::narrow.Tensor
xfail("nn.functional.triplet_margin_loss", ""),
xfail("nn.functional.pdist", ""),
xfail("nn.functional.max_unpool1d", "grad"),
xfail("nn.functional.multi_margin_loss", ""),
xfail("nn.functional.multilabel_margin_loss", ""),
xfail("nn.functional.max_unpool3d", "grad"),
xfail("nn.functional.max_unpool2d", ""),
xfail("nn.functional.max_unpool2d", "grad"),
xfail("nn.functional.margin_ranking_loss", ""),
xfail("nn.functional.max_unpool1d", ""),
xfail("nn.functional.soft_margin_loss", ""),
xfail("nn.functional.max_unpool3d", ""),
xfail("linalg.ldl_solve", "", device_type="cpu"),
xfail("chalf", ""),
xfail("clamp_max", ""),
xfail("jiterator_binary_return_by_ref", device_type="cuda"),
xfail("jiterator_unary", device_type="cuda"),
xfail("jiterator_2inputs_2outputs", device_type="cuda"),
xfail("special.airy_ai"),
xfail("clamp_min", ""),
xfail("sparse.sampled_addmm"),
xfail("sparse.mm", "reduce"),
xfail("special.chebyshev_polynomial_t"),
xfail("special.chebyshev_polynomial_v"),
xfail("special.chebyshev_polynomial_u"),
xfail("special.chebyshev_polynomial_w"),
xfail("special.shifted_chebyshev_polynomial_t"),
xfail("special.shifted_chebyshev_polynomial_v"),
xfail("special.shifted_chebyshev_polynomial_u"),
xfail("special.shifted_chebyshev_polynomial_w"),
xfail("_segment_reduce", "offsets"),
xfail("index_reduce", "prod"),
xfail("index_reduce", "mean"),
xfail("index_reduce", "amin"),
xfail("index_reduce", "amax"),
xfail("special.laguerre_polynomial_l"),
xfail("special.legendre_polynomial_p"),
xfail("special.hermite_polynomial_h"),
xfail("jiterator_binary", device_type="cuda"),
xfail("jiterator_4inputs_with_extra_args", device_type="cuda"),
xfail("_segment_reduce", "lengths"),
xfail("lu_solve", ""),
xfail("special.hermite_polynomial_he"),
xfail("nn.functional.dropout3d", ""),
xfail("as_strided_scatter", ""),
xfail("equal", ""),
xfail("linalg.lu", ""),
skip("linalg.ldl_solve", ""),
skip("_softmax_backward_data"),
# One or more of the overload doesn't have a Batch rule.
xfail("bincount"),
# RuntimeError: Expected all tensors to be on the same device,
# but found at least two devices, cuda:0 and cpu!
xfail("ge", device_type="cuda"),
xfail(
"searchsorted"
), # aten::searchsorted.Scalar hit the vmap fallback which is currently disabled
}
),
)
def test_op_has_batch_rule(self, device, dtype, op):
# needs to be fixed
inplace_failures = (
"addbmm",
"addcdiv",
"addcmul",
"addmm",
"addmv",
"addr",
"baddbmm",
"clamp",
"conj_physical",
"cumprod",
"cumsum",
"floor_divide",
"fmod",
"heaviside",
"hypot",
"igamma",
"igammac",
"index_copy",
"ldexp",
"lerp",
"neg",
"nextafter",
"polygamma",
"pow",
"remainder",
"scatter",
"square",
"sub",
"trunc",
"xlogy",
)
self.opinfo_vmap_test(
device, dtype, op, check_has_batch_rule=True, skip_inplace=inplace_failures
)
def test_linalg_svd(self, device):
# linalg_svd returns a tuple of three tensors, (U, S, Vh).
# Given the same input, it may return different tensors,
# because svd isn't unique. To test that the svd is correct, we multiply
# U @ diag(S) @ Vh and check that the output from vmap matches the
# output from a for-loop.
def compute_A(out):
U, S, Vh = out
m = U.shape[-1]
n = Vh.shape[-2]
diag_S = S.new_zeros(*S.shape[:-1], m, n)
diag_S.diagonal(offset=0, dim1=-2, dim2=-1).copy_(S)
return U @ diag_S @ Vh
opinfos = [op for op in op_db if op.name == "linalg.svd"]
assert len(opinfos) > 0
for op in opinfos:
self.opinfo_vmap_test(
device,
torch.float,
op,
check_has_batch_rule=True,
postprocess_fn=compute_A,
)
def test_linalg_eigh(self, device):
# linalg_svd returns two tensors, (Q, L).
# Given the same input, it may return different tensors,
# because the eig decomposition isn't unique.
# To test that eigh is correct, we multiply
# Q @ diag(L) @ Qh and check that the output from vmap matches the
# output from a for-loop.
def compute_A(out):
L, Q = out
n = Q.shape[-1]
diag_L = L.new_zeros(*L.shape[:-1], n, n)
diag_L.diagonal(offset=0, dim1=-2, dim2=-1).copy_(L)
Qh = Q.transpose(-2, -1).conj()
return Q @ diag_L @ Qh
opinfos = [op for op in op_db if op.name == "linalg.eigh"]
assert len(opinfos) > 0
for op in opinfos:
self.opinfo_vmap_test(
device,
torch.float,
op,
check_has_batch_rule=True,
postprocess_fn=compute_A,
)
@skipIfTorchDynamo()
def test_slogdet(self, device):
# There's no OpInfo for this
def test():
B = 2
x = torch.randn(B, 5, 5, device=device)
self.vmap_outplace_test(torch.slogdet, (x,), {}, (0,))
check_vmap_fallback(self, test, torch.slogdet)
def test_index_fill(self, device):
# There's no OpInfo for these tests
B = 2
def test1():
# negative dim
x = torch.randn(B, 5, 5, device=device)
dim = -2
index = torch.tensor([[2, 3], [0, 4]], device=device)
value = 5.0
self.vmap_outplace_test(
torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
)
def test2():
# self batched, self logical rank 1, index logical rank 1
x = torch.zeros(B, 3, device=device)
dim = 0
index = torch.tensor([[0], [1]], device=device)
for value in (1.0, torch.rand((), device=device)):
self.vmap_outplace_test(
torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None)
)
def test3():
# self batched, self logical rank 1, index logical rank 0
x = torch.zeros(B, 3, device=device)
dim = 0
index = torch.tensor([0, 1], device=device)
for value in (1.0, torch.rand((), device=device)):
self.vmap_outplace_test(
torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None)
)
def test4():
# self not batched, self logical rank 0, index logical rank 1
x = torch.zeros([], device=device)
dim = 0
index = torch.tensor([[0], [0]], device=device)
for value in (1.0, torch.rand((), device=device)):
self.vmap_outplace_test(
torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
)
def test5():
# self not batched, self logical rank 0, index logical rank 0
x = torch.zeros([], device=device)
dim = 0
index = torch.tensor([0, 0], device=device)
for value in (1.0, torch.rand((), device=device)):
self.vmap_outplace_test(
torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
)
def test6():
# self not batched, self logical rank 0, index logical rank 1
x = torch.zeros(3, device=device)
dim = 0
index = torch.tensor([[0], [1]], device=device)
for value in (1.0, torch.rand((), device=device)):
self.vmap_outplace_test(
torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
)
def test7():
# self not batched, self logical rank 0, index logical rank 0
x = torch.zeros(3, device=device)
dim = 0
index = torch.tensor([0, 1], device=device)
for value in (1.0, torch.rand((), device=device)):
self.vmap_outplace_test(
torch.index_fill, (x, dim, index, value), {}, (None, None, 0, None)
)
def test8():
# self batched, self logical rank > 1, index logical rank 0
x = torch.zeros(B, 3, 3, device=device)
dim = 0
index = torch.tensor([0, 1], device=device)
for value in (1.0, torch.rand((), device=device)):
self.vmap_outplace_test(
torch.index_fill, (x, dim, index, value), {}, (0, None, 0, None)
)
for test in (test1, test2, test3, test4, test5, test6, test7, test8):
check_vmap_fallback(self, test, torch.index_fill)
def test_fill__Tensor(self, device):
# There's no OpInfo for fill_.Tensor, so here's an extra test for it.
def test():
B = 2
args = (torch.randn(B, 3, device=device), torch.randn(B))
self.vmap_inplace_test(Tensor.fill_, args, {}, (0, 0))
args = (torch.randn(3, B, device=device), torch.randn(B))
self.vmap_inplace_test(Tensor.fill_, args, {}, (-1, 0))
args = (torch.randn(3, device=device), torch.randn(B))
self.vmap_inplace_test(Tensor.fill_, args, {}, (None, 0))
args = (torch.randn(3, B, device=device), torch.randn([]))
self.vmap_inplace_test(Tensor.fill_, args, {}, (1, None))
check_vmap_fallback(self, test, Tensor.fill_)
@tf32_on_and_off(0.005)
def test_conv_double_backward(self, device):
images = torch.randn(2, 1, 5, 5, device=device)
weight = torch.randn(2, 1, 2, 2, device=device)
bias = torch.randn(2, device=device)
ggI = torch.randn_like(images)
ggW = torch.randn_like(weight)
ggb = torch.randn_like(bias)
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
transposed = False
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
gO = torch.randn_like(
F.conv2d(images, weight, bias, stride, padding, dilation, groups)
)
args = (
ggI,
ggW,
ggb,
gO,
weight,
images,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
is_cuda_sm86 = device.startswith("cuda") and torch.cuda.get_device_capability(
0
) == (8, 6)
atol, rtol = (1e-3, 1e-3) if is_cuda_sm86 else (1e-4, 1e-4)
def test():
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out, atol=atol, rtol=rtol)
check_vmap_fallback(self, test, op)
def test_isnan(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isnan
x = torch.randn(B, N, C, H, W)
x[x > 0] = float("nan")
test(self, op, (x,), in_dims=(0))
def test_sum_scalar(self, device):
x = torch.tensor([10.0], device=device)
y = vmap(torch.sum)(x)
self.assertEqual(y, x)
y = vmap(lambda x: x.sum(0))(x)
self.assertEqual(y, x)
y = vmap(lambda x: x.sum(-1))(x)
self.assertEqual(y, x)
def test_isinf(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isinf
x = torch.randn(B, N, C, H, W)
x[x > 0] = float("inf")
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
# vfdev-5: Probably, we can remove this line. Flake8 reported as unused
# test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
for op in [torch.ones_like, torch.zeros_like]:
x = torch.randn(B, N, C, H, W)
# todo(chilli): test these better
# Not testing correctness, just that they run
vmap(op, in_dims=(0,))(
x,
)
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
op = torch.flatten
x = torch.randn(2, 3, 4, 5)
test(self, op, (x, 1, 2), in_dims=(0, None, None))
def test_group_norm(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = F.group_norm
x = torch.randn(B, N, C, H, W)
weight = torch.randn(C)
bias = torch.randn(C)
test(self, op, (x, 3, weight, bias), in_dims=(0, None, None, None))
x = torch.randn(B, N, C, H, W)
weight = torch.randn(B, C)
bias = torch.randn(B, C)
test(self, op, (x, 4, weight, bias), in_dims=(0, None, 0, 0))
def test_index_put(self, device):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
self.assertEqual(
vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base
)
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
def f(x, y, z):
x[y] = z
return x
x = torch.randn(3, 4, 5, device=device)
y = torch.zeros((3, 2), device=device).long()
z = torch.randn(3, 2, 5, device=device)
test(f, x, y, z)
# indexing innermost dim
def f(t, idx, values):
t[:, idx] = values
return t
t = torch.zeros((3, 2, 3))
values = torch.ones((3, 1, 2))
idx = torch.tensor([[1, 2]]).expand((3, 2))
test(f, t, idx, values)
# indexing middle dim
def f(t, idx, values):
t[:, idx, :] = values
return t
t = torch.zeros((3, 2, 3, 3))
values = torch.ones((3, 1, 2, 3))
idx = torch.tensor([[0, 2]]).expand((3, 2))
test(f, t, idx, values)
# indexing with slices
def f(t, values):
t[:, :2, :] = values
return t
base = f(t[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0))(t, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None))(t, values[0])[0], base)
# index_put_
tensor = torch.zeros(3, 3, 4)
value = torch.ones(3, 2)
idxs = (
torch.tensor([[0], [1], [2]]),
torch.tensor([[0]]),
torch.tensor([1, 2]),
)
expected = torch.index_put_(tensor.clone(), idxs, value)
def f(t, idx, v):
torch.index_put_(t, idx, v)
return t
self.assertEqual(
vmap(f, in_dims=(0, (None, None), 0))(tensor, idxs[1:], value), expected
)
self.assertEqual(
vmap(f, in_dims=(0, (None, None), None))(tensor, idxs[1:], value[0]),
expected,
)
# boolean mask
B = 2
x = torch.randn(1, 3, 3)
gy = torch.randn(B, 1, 3, 3)
def f(x, gy):
mask = x < 1e-09
zeros = torch.zeros([])
index_put = torch.ops.aten.index_put.default(gy, [mask], zeros)
return index_put
self.vmap_outplace_test(f, (x, gy), {}, in_dims=(None, 0))
@onlyCUDA
@parametrize("inplace", [True, False])
def test_0d_tensor_index_put(self, device, inplace):
def f(t, idx, v):
fn = torch.index_put_ if inplace else torch.index_put
return fn(t, idx, v)
N = 2
t = torch.zeros((N, 5), device="cuda")
idx = torch.tensor([1, 3])
v = torch.tensor(1, dtype=t.dtype, device="cpu")
expected = torch.tensor([[0, 1, 0, 1, 0], [0, 1, 0, 1, 0]], dtype=t.dtype)
self.assertEqual(expected, vmap(f, in_dims=(0, None, None))(t, (idx,), v))
@parametrize("training", [True, False])
@parametrize("track_running_stats", [True, False])
@parametrize("affine", [True, False])
def test_batch_norm(self, device, affine, track_running_stats, training):
if not track_running_stats and not training:
return
test = functools.partial(_vmap_test, check_propagates_grad=False)
BN = torch.nn.BatchNorm2d
ensemble_size = 10
hidden_dim = 3
weights, buffers, _, _, _ = functional_init_with_buffers(BN, [ensemble_size])(
hidden_dim, affine=affine, track_running_stats=track_running_stats
)
inputs = [torch.randn(ensemble_size, 32, hidden_dim, 16, 16, device=device)]
in_dims = [0]
def append(inp, in_dim):
inputs.append(inp)
in_dims.append(in_dim)
if track_running_stats:
running_mean, running_var, _ = buffers
append(running_mean.to(device), 0)
append(running_var.to(device), 0)
else:
append(None, None)
append(None, None)
if affine:
weight, bias = weights
append(weight.to(device), 0)
append(bias.to(device), 0)
else:
append(None, None)
append(None, None)
append(training, None)
def op(inp, running_mean, running_var, weight, bias, training):
res = F.batch_norm(inp, running_mean, running_var, weight, bias, training)
if track_running_stats:
return res, running_mean, running_var
return res
test(self, op, tuple(inputs), in_dims=tuple(in_dims))
def test_torch_return_types_returns(self, device):
t = torch.randn(3, 2, 2, device=device)
self.assertTrue(
isinstance(vmap(torch.min, (0, None))(t, 0), torch.return_types.min)
)
self.assertTrue(
isinstance(vmap(torch.max, (0, None))(t, 0), torch.return_types.max)
)
self.assertTrue(
isinstance(
vmap(torch.topk, (0, None, None))(t, 1, 0), torch.return_types.topk
)
)
self.assertTrue(
isinstance(vmap(torch.linalg.eig, (0))(t), torch.return_types.linalg_eig)
)
def test_namedtuple_returns(self, device):
Point = namedtuple("Point", ["x", "y"])
def f(x, y):
return Point(x=x, y=y)
x = torch.randn(2, 5, device=device)
y = torch.randn(2, 3, device=device)
self.assertTrue(isinstance(vmap(f)(x, y), Point))
def test_inplace_on_view(self, device):
def func(leaf):
base = leaf * leaf
view = base.transpose(0, 1)
view[2:4, 2:4] *= 2
view[0:2, 0:2].diagonal().sin_()
view = view[1:3, 1:3]
view.cos_()
return view
def push_vjp(leaf, gout):
_, vjp_fn = vjp(func, leaf)
(result,) = vjp_fn(gout)
return result
leaf = torch.randn(4, 4, device=device)
gout = torch.randn(2, 2, device=device)
args = (leaf, gout)
for (
batched_args,
in_dims,
_,
) in generate_vmap_inputs(args, {}):
if in_dims[1] is None:
# triggers some composite compliance problem
continue
self.vmap_outplace_test(push_vjp, batched_args, {}, in_dims)
def test_advanced_indexing(self, device):
def test(f, args):
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(f, args, {}):
self.assertEqual(loop_out, batched_out)
def f(x, idx):
return x[:, idx]
def f2(x, idx):
return x[idx, :]
def f3(x, idx):
return x[:, :, idx]
inps = (
torch.randn(5, 5, 5, device=device),
torch.randn(5, 5, 5, 5, device=device),
torch.randn(5, 5, 5, 5, 5, device=device),
)
idxes = (
torch.tensor([0, 1, 2], device=device),
torch.tensor([0, 1, 2], device=device).reshape(3, 1),
torch.tensor([0, 1, 2], device=device).reshape(3, 1, 1),
)
for inp, idx in itertools.product(inps, idxes):
test(f, (inp, idx))
test(f2, (inp, idx))
test(f3, (inp, idx))
def test_nested_advanced_indexing(self, device):
e = torch.rand(7, 4, device=device)
idx = torch.tensor([0, 1], device=device).view(2, 1)
# simple reference implementation for comparison
def _fake_vmap(f, in_dims=0, out_dims=0):
def w(input):
r = [f(input.select(in_dims, i)) for i in range(input.size(in_dims))]
return torch.stack(r, out_dims)
return w
def with_vmap(_vmap):
def g(idx_):
def f(e_):
return e_[idx_]
return _vmap(f, in_dims=1)(e)
r = _vmap(g)(idx)
return r
a = with_vmap(vmap)
b = with_vmap(_fake_vmap)
self.assertEqual(a, b)
@ops(
filter(lambda op: "linalg" in op.name, op_db + additional_op_db),
allowed_dtypes=(torch.float,),
)
@skipOps(
"TestVmapOperatorsOpInfo",
"test_vmap_linalg_failure_1D_input",
{
xfail("linalg.vector_norm"), # can accept vector inputs
xfail("linalg.norm"), # can accept vector inputs
xfail("linalg.norm", "subgradients_at_zero"), # can accept vector inputs
xfail("linalg.vander"), # can accept vector inputs
skip(
"linalg.multi_dot"
), # accepts list of tensor inputs, has its own special test
xfail("linalg.vecdot"),
# throws in vmap on CUDA
# IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)
# but it passes locally
xfail("linalg.diagonal"),
skip("linalg.matrix_norm", ""),
skip("linalg.ldl_solve", ""),
},
)
def test_vmap_linalg_failure_1D_input(self, device, dtype, op):
for sample in op.sample_inputs(device, dtype, requires_grad=False):
if sample.input.dim() != 2 or sample.input.shape[0] == 0:
continue
test_input = sample.input[
0
] # using the sample input avoids numerical inconsistency issues
with self.assertRaisesRegex(RuntimeError, "dimension"):
op(test_input, *sample.args, **sample.kwargs)
def op_wrapper(inp):
return op(inp, *sample.args, **sample.kwargs)
# square inputs are more likely to pass linalg checks
test_input = test_input.expand(test_input.shape[0], test_input.shape[0])
with self.assertRaisesRegex(RuntimeError, "dimension"):
return vmap(op_wrapper)(test_input)
def test_vmap_multi_dot_failure_1D_input(self):
# special exception for first and last tensors so making giving 3 items avoids special cases
inputs = (torch.randn(3, 3), torch.randn(3), torch.randn(3, 3))
with self.assertRaisesRegex(RuntimeError, "tensor 1 must be 2D but got 1D"):
torch.linalg.multi_dot(inputs)
# square inputs are more likely to pass linalg checks
inputs = tuple(i.expand(i.shape[0], i.shape[0]) for i in inputs)
with self.assertRaisesRegex(RuntimeError, "tensor 1 must be 2D but got 1D"):
return vmap(torch.linalg.multi_dot)(inputs)
def test_vmap_escaped_error(self):
escaped = None
def f(x):
nonlocal escaped
escaped = x
return x**2
x = torch.randn([3, 3, 3, 3, 3])
vmap(f)(x)
common_message = (
r"your tensor may have escaped from inside a function being vmapped.*{0}.*"
)
# Note: These are not a complete set of tests for all possible functions calling 'vmap_check_escaped'
with self.assertRaisesRegex(
RuntimeError, common_message.format("gen_vmap_plumbing")
):
escaped.sin()
with self.assertRaisesRegex(
RuntimeError, common_message.format("boxed_tensor_inputs_batch_rule")
):
escaped.sin_()
with self.assertRaisesRegex(
RuntimeError, common_message.format("gen_vmap_inplace_plumbing")
):
escaped.mul_(1)
with self.assertRaisesRegex(
RuntimeError, common_message.format("binary_cross_entropy_plumbing")
):
torch.nn.functional.binary_cross_entropy(escaped, torch.zeros([3, 3, 3, 3]))
with self.assertRaisesRegex(
RuntimeError, common_message.format("boxed_existing_bdim_all_batch_rule")
):
torch.nn.functional.adaptive_max_pool2d(escaped, output_size=(1, 1))
with self.assertRaisesRegex(
RuntimeError, common_message.format("boxed_reduction_batch_rule")
):
escaped.argmin()
a = torch.zeros([4, 4, 4, 4])
b = torch.zeros([4, 4, 4, 4], dtype=torch.long)
with self.assertRaisesRegex(
RuntimeError, common_message.format("boxed_all_tensors_have_optional_bdim")
):
torch.ops.aten.adaptive_max_pool2d_backward(escaped, a, b)
vmap(f)(torch.tensor([[0, 0], [0, 0]], dtype=torch.int))
with self.assertRaisesRegex(
RuntimeError, common_message.format("gen_vmap_plumbing_no_returns")
):
torch.ops.aten._linalg_check_errors(escaped, "linalg.inv", is_matrix=False)
def test_vmap_with_anomaly_detection(self):
with torch.autograd.set_detect_anomaly(True):
x = torch.zeros(3) - 1
def fn(x):
return x.sum()
per_sample_grad = vmap(grad(fn))(x)
self.assertEqual(per_sample_grad, torch.ones_like(x))
def bad_fn(x):
return x.sqrt().sum()
err_msg = "Function 'SqrtBackward0' returned nan values in its 0th output."
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(grad(bad_fn))(x)
def test_searchsorted_bucketize(self, device):
# OpInfo generates test with repeated samples in batch dim.
# Thus we test explicitly with different samples across a batch.
def test():
boundaries = torch.tensor(
[[1, 4, 5, 7, 9], [1, 2, 6, 8, 10]], device=device
)
v = torch.tensor(3, device=device)
self.vmap_outplace_test(torch.searchsorted, (boundaries, v), {}, (0, None))
self.vmap_outplace_test(torch.bucketize, (v, boundaries), {}, (None, 0))
boundaries = torch.tensor([[1, 4, 5, 7, 9], [1, 2, 4, 8, 9]], device=device)
v = torch.tensor([3, 4], device=device)
self.vmap_outplace_test(torch.searchsorted, (boundaries, v), {}, (0, 0))
self.vmap_outplace_test(torch.bucketize, (v, boundaries), {}, (0, 0))
test()
@markDynamoStrictTest
| TestVmapOperatorsOpInfo |
python | getsentry__sentry | src/sentry/replays/endpoints/project_replay_details.py | {
"start": 814,
"end": 1185
} | class ____(ProjectPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin"],
"POST": ["project:write", "project:admin"],
"PUT": ["project:write", "project:admin"],
"DELETE": ["project:read", "project:write", "project:admin"],
}
@region_silo_endpoint
@extend_schema(tags=["Replays"])
| ReplayDetailsPermission |
python | catalyst-team__catalyst | catalyst/contrib/datasets/mnist.py | {
"start": 11270,
"end": 12958
} | class ____(MNIST):
"""Partial MNIST dataset.
Args:
num_samples: number of examples per selected class/digit. default: 100
classes: list selected MNIST classes. default: (0, 1, 2)
**kwargs: MNIST parameters
Examples:
>>> dataset = PartialMNIST(".", download=True)
>>> len(dataset)
300
>>> sorted(set([d.item() for d in dataset.targets]))
[0, 1, 2]
>>> torch.bincount(dataset.targets)
tensor([100, 100, 100])
"""
def __init__(
self,
num_samples: int = 100,
classes: Optional[Sequence] = (0, 1, 2),
**kwargs,
):
self.num_samples = num_samples
self.classes = sorted(classes) if classes else list(range(10))
super().__init__(**kwargs)
self.data, self.targets = self._prepare_subset(
self.data, self.targets, num_samples=self.num_samples, classes=self.classes
)
@staticmethod
def _prepare_subset(
full_data: torch.Tensor,
full_targets: torch.Tensor,
num_samples: int,
classes: Sequence,
):
counts = {d: 0 for d in classes}
indexes = []
for idx, target in enumerate(full_targets):
label = target.item()
if counts.get(label, float("inf")) >= num_samples:
continue
indexes.append(idx)
counts[label] += 1
if all(counts[k] >= num_samples for k in counts):
break
data = full_data[indexes]
targets = full_targets[indexes]
return data, targets
__all__ = ["MNIST", "MnistMLDataset", "MnistQGDataset", "PartialMNIST"]
| PartialMNIST |
python | ansible__ansible | test/integration/targets/collections/test_task_resolved_plugin/callback_plugins/display_resolved_action.py | {
"start": 747,
"end": 1583
} | class ____(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'display_resolved_action'
CALLBACK_NEEDS_ENABLED = True
def __init__(self, *args, **kwargs):
super(CallbackModule, self).__init__(*args, **kwargs)
def v2_playbook_on_task_start(self, task, is_conditional):
if self.get_option("test_on_task_start"):
self._display.display(f"v2_playbook_on_task_start: {task.action} == {task.resolved_action}")
def v2_runner_item_on_ok(self, result):
self._display.display(f"v2_runner_item_on_ok: {result.task.action} == {result.task.resolved_action}")
def v2_runner_on_ok(self, result):
if not result.task.loop:
self._display.display(f"v2_runner_on_ok: {result.task.action} == {result.task.resolved_action}")
| CallbackModule |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/base.py | {
"start": 146,
"end": 6053
} | class ____(BaseReader):
"""
Knowledge base reader.
Crawls and reads articles from a knowledge base/help center with Playwright.
Tested on Zendesk and Intercom CMS, may work on others.
Can be run in headless mode but it may be blocked by Cloudflare. Run it headed to be safe.
Times out occasionally, just increase the default time out if it does.
Requires the `playwright` package.
Args:
root_url (str): the base url of the knowledge base, with no trailing slash
e.g. 'https://support.intercom.com'
link_selectors (List[str]): list of css selectors to find links to articles while crawling
e.g. ['.article-list a', '.article-list a']
article_path (str): the url path of articles on this domain so the crawler knows when to stop
e.g. '/articles'
title_selector (Optional[str]): css selector to find the title of the article
e.g. '.article-title'
subtitle_selector (Optional[str]): css selector to find the subtitle/description of the article
e.g. '.article-subtitle'
body_selector (Optional[str]): css selector to find the body of the article
e.g. '.article-body'
"""
def __init__(
self,
root_url: str,
link_selectors: List[str],
article_path: str,
title_selector: Optional[str] = None,
subtitle_selector: Optional[str] = None,
body_selector: Optional[str] = None,
max_depth: int = 100,
) -> None:
"""Initialize with parameters."""
self.root_url = root_url
self.link_selectors = link_selectors
self.article_path = article_path
self.title_selector = title_selector
self.subtitle_selector = subtitle_selector
self.body_selector = body_selector
self.max_depth = max_depth
def load_data(self) -> List[Document]:
"""Load data from the knowledge base."""
from playwright.sync_api import sync_playwright
with sync_playwright() as p:
browser = p.chromium.launch(headless=False)
# Crawl
article_urls = self.get_article_urls(
browser, self.root_url, self.root_url, self.max_depth
)
# Scrape
documents = []
for url in article_urls:
article = self.scrape_article(
browser,
url,
)
extra_info = {
"title": article["title"],
"subtitle": article["subtitle"],
"url": article["url"],
}
documents.append(Document(text=article["body"], extra_info=extra_info))
browser.close()
return documents
def scrape_article(
self,
browser: Any,
url: str,
) -> Dict[str, str]:
"""
Scrape a single article url.
Args:
browser (Any): a Playwright Chromium browser.
url (str): URL of the article to scrape.
Returns:
Dict[str, str]: a mapping of article attributes to their values.
"""
page = browser.new_page(ignore_https_errors=True)
page.set_default_timeout(60000)
page.goto(url, wait_until="domcontentloaded")
title = (
(
page.query_selector(self.title_selector).evaluate(
"node => node.innerText"
)
)
if self.title_selector
else ""
)
subtitle = (
(
page.query_selector(self.subtitle_selector).evaluate(
"node => node.innerText"
)
)
if self.subtitle_selector
else ""
)
body = (
(page.query_selector(self.body_selector).evaluate("node => node.innerText"))
if self.body_selector
else ""
)
page.close()
print("scraped:", url)
return {"title": title, "subtitle": subtitle, "body": body, "url": url}
def get_article_urls(
self,
browser: Any,
root_url: str,
current_url: str,
max_depth: int = 100,
depth: int = 0,
) -> List[str]:
"""
Recursively crawl through the knowledge base to find a list of articles.
Args:
browser (Any): a Playwright Chromium browser.
root_url (str): root URL of the knowledge base.
current_url (str): current URL that is being crawled.
max_depth (int): maximum recursion level for the crawler
depth (int): current depth level
Returns:
List[str]: a list of URLs of found articles.
"""
if depth >= max_depth:
print(f"Reached max depth ({max_depth}): {current_url}")
return []
page = browser.new_page(ignore_https_errors=True)
page.set_default_timeout(60000)
page.goto(current_url, wait_until="domcontentloaded")
# If this is a leaf node aka article page, return itself
if self.article_path in current_url:
print("Found an article: ", current_url)
page.close()
return [current_url]
# Otherwise crawl this page and find all the articles linked from it
article_urls = []
links = []
for link_selector in self.link_selectors:
ahrefs = page.query_selector_all(link_selector)
links.extend(ahrefs)
for link in links:
url = root_url + page.evaluate("(node) => node.getAttribute('href')", link)
article_urls.extend(
self.get_article_urls(browser, root_url, url, max_depth, depth + 1)
)
page.close()
return article_urls
| KnowledgeBaseWebReader |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 155434,
"end": 161421
} | class ____(Request):
"""
Get raw data for a specific metric variants in the task
:param task: Task ID
:type task: str
:param metric: Metric and variants for which to return data points
:type metric: MetricVariants
:param key: Array of x axis to return. Supported values: iter - iteration
number timestamp - event timestamp as milliseconds since epoch
:type key: ScalarKeyEnum
:param batch_size: The number of data points to return for this call. Optional,
the default value is 10000. Maximum batch size is 200000
:type batch_size: int
:param count_total: Count the total number of data points. If false, total
number of data points is not counted and null is returned
:type count_total: bool
:param scroll_id: Optional Scroll ID. Use to get more data points following a
previous call
:type scroll_id: str
"""
_service = "events"
_action = "scalar_metrics_iter_raw"
_version = "2.20"
_schema = {
"definitions": {
"metric_variants": {
"metric": {"description": "The metric name", "type": "string"},
"type": "object",
"variants": {
"description": "The names of the metric variants",
"items": {"type": "string"},
"type": "array",
},
},
"scalar_key_enum": {
"enum": ["iter", "timestamp", "iso_time"],
"type": "string",
},
},
"properties": {
"batch_size": {
"default": 10000,
"description": "The number of data points to return for this call. Optional, the default value is 10000. Maximum batch size is 200000",
"type": "integer",
},
"count_total": {
"default": False,
"description": "Count the total number of data points. If false, total number of data points is not counted and null is returned",
"type": "boolean",
},
"key": {
"$ref": "#/definitions/scalar_key_enum",
"description": "Array of x axis to return. Supported values:iter - iteration numbertimestamp - event timestamp as milliseconds since epoch",
},
"metric": {
"$ref": "#/definitions/metric_variants",
"description": "Metric and variants for which to return data points",
},
"scroll_id": {
"description": "Optional Scroll ID. Use to get more data points following a previous call",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "metric"],
"type": "object",
}
def __init__(
self,
task: str,
metric: Any,
key: Any = None,
batch_size: Optional[int] = 10000,
count_total: Optional[bool] = False,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(ScalarMetricsIterRawRequest, self).__init__(**kwargs)
self.task = task
self.metric = metric
self.key = key
self.batch_size = batch_size
self.count_total = count_total
self.scroll_id = scroll_id
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("metric")
def metric(self) -> Any:
return self._property_metric
@metric.setter
def metric(self, value: Any) -> None:
if value is None:
self._property_metric = None
return
if isinstance(value, dict):
value = MetricVariants.from_dict(value)
else:
self.assert_isinstance(value, "metric", MetricVariants)
self._property_metric = value
@schema_property("key")
def key(self) -> Any:
return self._property_key
@key.setter
def key(self, value: Any) -> None:
if value is None:
self._property_key = None
return
if isinstance(value, six.string_types):
try:
value = ScalarKeyEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "key", enum.Enum)
self._property_key = value
@schema_property("batch_size")
def batch_size(self) -> Optional[int]:
return self._property_batch_size
@batch_size.setter
def batch_size(self, value: Optional[int]) -> None:
if value is None:
self._property_batch_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "batch_size", six.integer_types)
self._property_batch_size = value
@schema_property("count_total")
def count_total(self) -> Optional[bool]:
return self._property_count_total
@count_total.setter
def count_total(self, value: Optional[bool]) -> None:
if value is None:
self._property_count_total = None
return
self.assert_isinstance(value, "count_total", (bool,))
self._property_count_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| ScalarMetricsIterRawRequest |
python | kamyu104__LeetCode-Solutions | Python/design-phone-directory.py | {
"start": 145,
"end": 1510
} | class ____(object):
def __init__(self, maxNumbers):
"""
Initialize your data structure here
@param maxNumbers - The maximum numbers that can be stored in the phone directory.
:type maxNumbers: int
"""
self.__curr = 0
self.__numbers = range(maxNumbers)
self.__used = [False] * maxNumbers
def get(self):
"""
Provide a number which is not assigned to anyone.
@return - Return an available number. Return -1 if none is available.
:rtype: int
"""
if self.__curr == len(self.__numbers):
return -1
number = self.__numbers[self.__curr]
self.__curr += 1
self.__used[number] = True
return number
def check(self, number):
"""
Check if a number is available or not.
:type number: int
:rtype: bool
"""
return 0 <= number < len(self.__numbers) and \
not self.__used[number]
def release(self, number):
"""
Recycle or release a number.
:type number: int
:rtype: void
"""
if not 0 <= number < len(self.__numbers) or \
not self.__used[number]:
return
self.__used[number] = False
self.__curr -= 1
self.__numbers[self.__curr] = number
| PhoneDirectory |
python | eth-brownie__brownie | brownie/_cli/console.py | {
"start": 14155,
"end": 22959
} | class ____(AutoSuggest):
"""
AutoSuggest subclass to display contract input hints.
If an object has an `_autosuggest` method, it is used to build the suggestion.
Otherwise, names and default values are pulled from `__code__` and `__defaults__`
respectively.
"""
def __init__(self, console, local_dict: Dict[str, Any]) -> None:
self.console = console
self.locals = local_dict
super().__init__()
def get_suggestion(self, buffer, document):
try:
text = "\n".join(self.console.buffer + [document.text])
base, _, comma_data = _parse_document(self.locals, text)
# find the active function call
del base[-1]
while base[-1] == self.locals:
del base[-1]
del comma_data[-1]
obj = base[-1]
# calculate distance from last comma
count, offset = comma_data[-1]
lines = text.count("\n") + 1
if offset[0] < lines:
distance = len(document.text)
else:
distance = len(document.text) - offset[1]
if hasattr(obj, "_autosuggest"):
inputs = obj._autosuggest(obj)
else:
inputs = [f" {i}" for i in obj.__code__.co_varnames[: obj.__code__.co_argcount]]
if obj.__defaults__:
for i in range(-1, -1 - len(obj.__defaults__), -1):
inputs[i] = f"{inputs[i]}={obj.__defaults__[i]}"
if inputs and inputs[0] in (" self", " cls"):
inputs = inputs[1:]
if not count and not inputs:
return Suggestion(")")
inputs[0] = inputs[0][1:]
remaining_inputs = inputs[count:]
remaining_inputs[0] = remaining_inputs[0][distance:]
return Suggestion(f"{','.join(remaining_inputs)})")
except Exception:
return
def _obj_from_token(obj, token):
key = token.string
if isinstance(obj, dict):
return obj[key]
if isinstance(obj, Iterable):
try:
return obj[int(key)] # type: ignore [index]
except ValueError:
pass
return getattr(obj, key)
def _parse_document(local_dict, text):
if text in _parser_cache:
if _parser_cache[text] is None:
raise SyntaxError
# return copies of lists so we can mutate them without worry
active_objects, current_text, comma_data = _parser_cache[text]
return active_objects.copy(), current_text, comma_data.copy()
last_token: Optional[tokenize.TokenInfo] = None
active_objects = [local_dict]
pending_active = []
# number of open parentheses
paren_count = 0
# is a square bracket open?
is_open_sqb = False
# number of comments at this call depth, end offset of the last comment
comma_data = [(0, (0, 0))]
token_iter = tokenize.generate_tokens(StringIO(text).readline)
while True:
try:
token = next(token_iter)
except (tokenize.TokenError, StopIteration):
break
if token.exact_type in (0, 4):
# end marker, newline
break
if token.exact_type in (5, 6, 61):
# indent, dedent, non-terminating newline
# these can be ignored
continue
if token.type == 54 and token.string not in ",.[]()":
# if token is an operator or delimiter but not a parenthesis or dot, this is
# the start of a new expression. restart evaluation from the next token.
last_token = None
active_objects[-1] = local_dict
continue
if token.exact_type == 8:
# right parenthesis `)`
paren_count -= 1
del comma_data[-1]
del active_objects[-1]
last_token = None
if active_objects[-1] != local_dict:
try:
pending_active = active_objects[-1].__annotations__["return"]
if isinstance(pending_active, str):
module = sys.modules[active_objects[-1].__module__]
pending_active = getattr(module, pending_active)
except (AttributeError, KeyError):
pending_active = None
active_objects[-1] = None
elif token.exact_type == 10:
# right square bracket `]`
if not is_open_sqb:
# no support for nested index references or multiple keys
_parser_cache[text] = None
raise SyntaxError
is_open_sqb = False
del comma_data[-1]
del active_objects[-1]
pending_active = None
if active_objects[-1] != local_dict:
try:
# try to get the actual object first
pending_active = _obj_from_token(active_objects[-1], last_token)
except (AttributeError, TypeError):
# if we can't get the object, use the return type from the annotation
try:
func = active_objects[-1].__getitem__.__func__
pending_active = func.__annotations__["return"]
if isinstance(pending_active, str):
module = sys.modules[active_objects[-1].__module__]
pending_active = getattr(module, pending_active)
except (AttributeError, KeyError):
pass
except Exception:
pass
active_objects[-1] = None
last_token = None
elif token.exact_type == 12:
# comma `,`
comma_data[-1] = (comma_data[-1][0] + 1, token.end)
last_token = None
active_objects[-1] = local_dict
pending_active = None
elif token.exact_type == 23:
# period `.`
if pending_active:
active_objects[-1] = pending_active
pending_active = None
else:
active_objects[-1] = _obj_from_token(active_objects[-1], last_token)
last_token = None
elif token.exact_type == 7:
# left parenthesis `(`
if pending_active:
active_objects[-1] = pending_active
pending_active = None
if last_token:
obj = _obj_from_token(active_objects[-1], last_token)
if inspect.isclass(obj):
obj = obj.__init__
elif (
callable(obj)
and not hasattr(obj, "_autosuggest")
and not inspect.ismethod(obj)
and not inspect.isfunction(obj)
):
# object is a callable class instance
obj = obj.__call__
# ensure we aren't looking at a decorator
if hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
active_objects[-1] = obj
last_token = None
if not hasattr(active_objects[-1], "__call__"):
raise SyntaxError
paren_count += 1
comma_data.append((0, token.end))
active_objects.append(local_dict)
elif token.exact_type == 9:
# left square bracket `[`
if is_open_sqb:
_parser_cache[text] = None
raise SyntaxError
if pending_active:
active_objects[-1] = pending_active
pending_active = None
if last_token:
active_objects[-1] = _obj_from_token(active_objects[-1], last_token)
last_token = None
if not hasattr(active_objects[-1], "__getitem__"):
raise SyntaxError
is_open_sqb = True
comma_data.append((0, token.end))
active_objects.append(local_dict)
else:
if pending_active or last_token:
_parser_cache[text] = None
raise SyntaxError
last_token = token
# if the final token is a name or number, it is the current text we are basing
# the completion suggestion on. otherwise, there is no current text.
current_text = ""
if text.endswith(" "):
active_objects[-1] = local_dict
elif last_token and last_token.type in (1, 2, 3):
current_text = last_token.string
_parser_cache[text] = (active_objects, current_text, comma_data)
return active_objects.copy(), current_text, comma_data.copy()
| ConsoleAutoSuggest |
python | fsspec__filesystem_spec | fsspec/implementations/libarchive.py | {
"start": 2362,
"end": 7098
} | class ____(AbstractArchiveFileSystem):
"""Compressed archives as a file-system (read-only)
Supports the following formats:
tar, pax , cpio, ISO9660, zip, mtree, shar, ar, raw, xar, lha/lzh, rar
Microsoft CAB, 7-Zip, WARC
See the libarchive documentation for further restrictions.
https://www.libarchive.org/
Keeps file object open while instance lives. It only works in seekable
file-like objects. In case the filesystem does not support this kind of
file object, it is recommended to cache locally.
This class is pickleable, but not necessarily thread-safe (depends on the
platform). See libarchive documentation for details.
"""
root_marker = ""
protocol = "libarchive"
cachable = False
def __init__(
self,
fo="",
mode="r",
target_protocol=None,
target_options=None,
block_size=DEFAULT_BLOCK_SIZE,
**kwargs,
):
"""
Parameters
----------
fo: str or file-like
Contains ZIP, and must exist. If a str, will fetch file using
:meth:`~fsspec.open_files`, which must return one file exactly.
mode: str
Currently, only 'r' accepted
target_protocol: str (optional)
If ``fo`` is a string, this value can be used to override the
FS protocol inferred from a URL
target_options: dict (optional)
Kwargs passed when instantiating the target FS, if ``fo`` is
a string.
"""
super().__init__(self, **kwargs)
if mode != "r":
raise ValueError("Only read from archive files accepted")
if isinstance(fo, str):
files = open_files(fo, protocol=target_protocol, **(target_options or {}))
if len(files) != 1:
raise ValueError(
f'Path "{fo}" did not resolve to exactly one file: "{files}"'
)
fo = files[0]
self.of = fo
self.fo = fo.__enter__() # the whole instance is a context
self.block_size = block_size
self.dir_cache = None
@contextmanager
def _open_archive(self):
self.fo.seek(0)
with custom_reader(self.fo, block_size=self.block_size) as arc:
yield arc
@classmethod
def _strip_protocol(cls, path):
# file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
fields = {
"name": "pathname",
"size": "size",
"created": "ctime",
"mode": "mode",
"uid": "uid",
"gid": "gid",
"mtime": "mtime",
}
if self.dir_cache is not None:
return
self.dir_cache = {}
list_names = []
with self._open_archive() as arc:
for entry in arc:
if not entry.isdir and not entry.isfile:
# Skip symbolic links, fifo entries, etc.
continue
self.dir_cache.update(
{
dirname: {"name": dirname, "size": 0, "type": "directory"}
for dirname in self._all_dirnames(set(entry.name))
}
)
f = {key: getattr(entry, fields[key]) for key in fields}
f["type"] = "directory" if entry.isdir else "file"
list_names.append(entry.name)
self.dir_cache[f["name"]] = f
# libarchive does not seem to return an entry for the directories (at least
# not in all formats), so get the directories names from the files names
self.dir_cache.update(
{
dirname: {"name": dirname, "size": 0, "type": "directory"}
for dirname in self._all_dirnames(list_names)
}
)
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise NotImplementedError
data = b""
with self._open_archive() as arc:
for entry in arc:
if entry.pathname != path:
continue
if entry.size == 0:
# empty file, so there are no blocks
break
for block in entry.get_blocks(entry.size):
data = block
break
else:
raise ValueError
return MemoryFile(fs=self, path=path, data=data)
| LibArchiveFileSystem |
python | realpython__materials | dwitter-part-1/source_code_final/dwitter/admin.py | {
"start": 115,
"end": 179
} | class ____(admin.StackedInline):
model = Profile
| ProfileInline |
python | getsentry__sentry | src/sentry/dynamic_sampling/models/projects_rebalancing.py | {
"start": 295,
"end": 477
} | class ____(ModelInput):
classes: list[RebalancedItem]
sample_rate: float
def validate(self) -> bool:
return 0.0 <= self.sample_rate <= 1.0
| ProjectsRebalancingInput |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 46419,
"end": 47728
} | class ____(Tap, RegionSelectTool):
''' *toolbar icon*: |poly_select_icon|
The polygon selection tool allows users to make selections on a
Plot by indicating a polygonal region with mouse clicks. single
clicks (or taps) add successive points to the definition of the
polygon, and a press click (or tap) indicates the selection
region is ready.
See :ref:`ug_styling_plots_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. note::
Selections can be comprised of multiple regions, even those
made by different selection tools. Hold down the SHIFT key
while making a selection to append the new selection to any
previous selection that might exist.
.. |poly_select_icon| image:: /_images/icons/polygon-select.svg
:height: 24px
:alt: Icon of a dashed trapezoid with an arrow pointing at the lower right representing the polygon-selection tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
| PolySelectTool |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP042.py | {
"start": 24,
"end": 50
} | class ____(str, Enum): ...
| A |
python | py-pdf__pypdf | tests/test_protocols.py | {
"start": 89,
"end": 411
} | class ____(PdfObjectProtocol):
pass
def test_pdfobjectprotocol():
o = IPdfObjectProtocol()
assert o.clone(None, False, None) is None
assert o._reference_clone(None, None) is None
assert o.get_object() is None
assert o.hash_value() is None
assert o.write_to_stream(None) is None
| IPdfObjectProtocol |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/scheduler.py | {
"start": 7463,
"end": 10951
} | class ____(Scheduler, ConfigurableClass):
"""Default scheduler implementation that submits runs from the long-lived ``dagster-daemon``
process. Periodically checks each running schedule for execution times that don't yet
have runs and launches them.
"""
def __init__(
self,
max_catchup_runs: int = DEFAULT_MAX_CATCHUP_RUNS,
max_tick_retries: int = 0,
inst_data: Optional[ConfigurableClassData] = None,
):
self.max_catchup_runs = check.opt_int_param(
max_catchup_runs, "max_catchup_runs", DEFAULT_MAX_CATCHUP_RUNS
)
self.max_tick_retries = check.opt_int_param(max_tick_retries, "max_tick_retries", 0)
self._inst_data = inst_data
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls):
return {
"max_catchup_runs": Field(
IntSource,
is_required=False,
default_value=DEFAULT_MAX_CATCHUP_RUNS,
description="""For partitioned schedules, controls the maximum number of past
partitions for each schedule that will be considered when looking for missing
runs . Generally this parameter will only come into play if the scheduler
falls behind or launches after experiencing downtime. This parameter will not be checked for
schedules without partition sets (for example, schedules created using the :py:func:`@schedule <dagster.schedule>` decorator) - only the most recent execution time will be considered for those schedules.
Note: No matter what this value is, the scheduler will never launch a run from a time
before the schedule was turned on, even if the schedule's ``start_date`` is earlier. If
you want to launch runs for earlier partitions, `launch a backfill </concepts/partitions-schedules-sensors/backfills>`_.
""",
),
"max_tick_retries": Field(
IntSource,
default_value=0,
is_required=False,
description=(
"For each schedule tick that raises an error, the number of times to retry the tick."
),
),
}
@classmethod
def from_config_value(
cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]
) -> Self:
return cls(inst_data=inst_data, **config_value)
def debug_info(self) -> str:
return ""
def wipe(self, instance: DagsterInstance) -> None:
pass
def _get_or_create_logs_directory(
self, instance: DagsterInstance, schedule_origin_id: str
) -> str:
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(schedule_origin_id, "schedule_origin_id")
logs_directory = os.path.join(instance.schedules_directory(), "logs", schedule_origin_id)
if not os.path.isdir(logs_directory):
mkdir_p(logs_directory)
return logs_directory
def get_logs_path(self, instance: DagsterInstance, schedule_origin_id: str) -> str:
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(schedule_origin_id, "schedule_origin_id")
logs_directory = self._get_or_create_logs_directory(instance, schedule_origin_id)
return os.path.join(logs_directory, "scheduler.log")
| DagsterDaemonScheduler |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/session.py | {
"start": 50079,
"end": 186465
} | class ____(_SessionClassMethods, EventTarget):
"""Manages persistence operations for ORM-mapped objects.
The :class:`_orm.Session` is **not safe for use in concurrent threads.**.
See :ref:`session_faq_threadsafe` for background.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
_is_asyncio = False
dispatch: dispatcher[Session]
identity_map: IdentityMap
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
.. seealso::
:func:`.identity_key` - helper function to produce the keys used
in this dictionary.
"""
_new: Dict[InstanceState[Any], Any]
_deleted: Dict[InstanceState[Any], Any]
bind: Optional[Union[Engine, Connection]]
__binds: Dict[_SessionBindKey, _SessionBind]
_flushing: bool
_warn_on_events: bool
_transaction: Optional[SessionTransaction]
_nested_transaction: Optional[SessionTransaction]
hash_key: int
autoflush: bool
expire_on_commit: bool
enable_baked_queries: bool
twophase: bool
join_transaction_mode: JoinTransactionMode
execution_options: _ExecuteOptions = util.EMPTY_DICT
_query_cls: Type[Query[Any]]
_close_state: _SessionCloseState
def __init__(
self,
bind: Optional[_SessionBind] = None,
*,
autoflush: bool = True,
future: Literal[True] = True,
expire_on_commit: bool = True,
autobegin: bool = True,
twophase: bool = False,
binds: Optional[Dict[_SessionBindKey, _SessionBind]] = None,
enable_baked_queries: bool = True,
info: Optional[_InfoType] = None,
query_cls: Optional[Type[Query[Any]]] = None,
autocommit: Literal[False] = False,
join_transaction_mode: JoinTransactionMode = "conditional_savepoint",
close_resets_only: Union[bool, _NoArg] = _NoArg.NO_ARG,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
):
r"""Construct a new :class:`_orm.Session`.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autoflush: When ``True``, all query operations will issue a
:meth:`~.Session.flush` call to this ``Session`` before proceeding.
This is a convenience feature so that :meth:`~.Session.flush` need
not be called repeatedly in order for database queries to retrieve
results.
.. seealso::
:ref:`session_flushing` - additional background on autoflush
:param autobegin: Automatically start transactions (i.e. equivalent to
invoking :meth:`_orm.Session.begin`) when database access is
requested by an operation. Defaults to ``True``. Set to
``False`` to prevent a :class:`_orm.Session` from implicitly
beginning transactions after construction, as well as after any of
the :meth:`_orm.Session.rollback`, :meth:`_orm.Session.commit`,
or :meth:`_orm.Session.close` methods are called.
.. versionadded:: 2.0
.. seealso::
:ref:`session_autobegin_disable`
:param bind: An optional :class:`_engine.Engine` or
:class:`_engine.Connection` to
which this ``Session`` should be bound. When specified, all SQL
operations performed by this session will execute via this
connectable.
:param binds: A dictionary which may specify any number of
:class:`_engine.Engine` or :class:`_engine.Connection`
objects as the source of
connectivity for SQL operations on a per-entity basis. The keys
of the dictionary consist of any series of mapped classes,
arbitrary Python classes that are bases for mapped classes,
:class:`_schema.Table` objects and :class:`_orm.Mapper` objects.
The
values of the dictionary are then instances of
:class:`_engine.Engine`
or less commonly :class:`_engine.Connection` objects.
Operations which
proceed relative to a particular mapped class will consult this
dictionary for the closest matching entity in order to determine
which :class:`_engine.Engine` should be used for a particular SQL
operation. The complete heuristics for resolution are
described at :meth:`.Session.get_bind`. Usage looks like::
Session = sessionmaker(
binds={
SomeMappedClass: create_engine("postgresql+psycopg2://engine1"),
SomeDeclarativeBase: create_engine(
"postgresql+psycopg2://engine2"
),
some_mapper: create_engine("postgresql+psycopg2://engine3"),
some_table: create_engine("postgresql+psycopg2://engine4"),
}
)
.. seealso::
:ref:`session_partitioning`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
:meth:`.Session.get_bind`
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
:class:`.sessionmaker` function, and is not sent directly to the
constructor for ``Session``.
:param enable_baked_queries: legacy; defaults to ``True``.
A parameter consumed
by the :mod:`sqlalchemy.ext.baked` extension to determine if
"baked queries" should be cached, as is the normal operation
of this extension. When set to ``False``, caching as used by
this particular extension is disabled.
.. versionchanged:: 1.4 The ``sqlalchemy.ext.baked`` extension is
legacy and is not used by any of SQLAlchemy's internals. This
flag therefore only affects applications that are making explicit
use of this extension within their own code.
:param execution_options: optional dictionary of execution options
that will be applied to all calls to :meth:`_orm.Session.execute`,
:meth:`_orm.Session.scalars`, and similar. Execution options
present in statements as well as options passed to methods like
:meth:`_orm.Session.execute` explicitly take precedence over
the session-wide options.
.. versionadded:: 2.1
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each :meth:`~.commit`,
so that all attribute/object access subsequent to a completed
transaction will load from the most recent database state.
.. seealso::
:ref:`session_committing`
:param future: Deprecated; this flag is always True.
.. seealso::
:ref:`migration_20_toplevel`
:param info: optional dictionary of arbitrary data to be associated
with this :class:`.Session`. Is available via the
:attr:`.Session.info` attribute. Note the dictionary is copied at
construction time so that modifications to the per-
:class:`.Session` dictionary will be local to that
:class:`.Session`.
:param query_cls: Class which should be used to create new Query
objects, as returned by the :meth:`~.Session.query` method.
Defaults to :class:`_query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a
:meth:`~.commit`, after :meth:`~.flush` has been issued for all
attached databases, the :meth:`~.TwoPhaseTransaction.prepare`
method on each database's :class:`.TwoPhaseTransaction` will be
called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param autocommit: the "autocommit" keyword is present for backwards
compatibility but must remain at its default value of ``False``.
:param join_transaction_mode: Describes the transactional behavior to
take when a given bind is a :class:`_engine.Connection` that
has already begun a transaction outside the scope of this
:class:`_orm.Session`; in other words the
:meth:`_engine.Connection.in_transaction()` method returns True.
The following behaviors only take effect when the :class:`_orm.Session`
**actually makes use of the connection given**; that is, a method
such as :meth:`_orm.Session.execute`, :meth:`_orm.Session.connection`,
etc. are actually invoked:
* ``"conditional_savepoint"`` - this is the default. if the given
:class:`_engine.Connection` is begun within a transaction but
does not have a SAVEPOINT, then ``"rollback_only"`` is used.
If the :class:`_engine.Connection` is additionally within
a SAVEPOINT, in other words
:meth:`_engine.Connection.in_nested_transaction()` method returns
True, then ``"create_savepoint"`` is used.
``"conditional_savepoint"`` behavior attempts to make use of
savepoints in order to keep the state of the existing transaction
unchanged, but only if there is already a savepoint in progress;
otherwise, it is not assumed that the backend in use has adequate
support for SAVEPOINT, as availability of this feature varies.
``"conditional_savepoint"`` also seeks to establish approximate
backwards compatibility with previous :class:`_orm.Session`
behavior, for applications that are not setting a specific mode. It
is recommended that one of the explicit settings be used.
* ``"create_savepoint"`` - the :class:`_orm.Session` will use
:meth:`_engine.Connection.begin_nested()` in all cases to create
its own transaction. This transaction by its nature rides
"on top" of any existing transaction that's opened on the given
:class:`_engine.Connection`; if the underlying database and
the driver in use has full, non-broken support for SAVEPOINT, the
external transaction will remain unaffected throughout the
lifespan of the :class:`_orm.Session`.
The ``"create_savepoint"`` mode is the most useful for integrating
a :class:`_orm.Session` into a test suite where an externally
initiated transaction should remain unaffected; however, it relies
on proper SAVEPOINT support from the underlying driver and
database.
.. tip:: When using SQLite, the SQLite driver included through
Python 3.11 does not handle SAVEPOINTs correctly in all cases
without workarounds. See the sections
:ref:`pysqlite_serializable` and :ref:`aiosqlite_serializable`
for details on current workarounds.
* ``"control_fully"`` - the :class:`_orm.Session` will take
control of the given transaction as its own;
:meth:`_orm.Session.commit` will call ``.commit()`` on the
transaction, :meth:`_orm.Session.rollback` will call
``.rollback()`` on the transaction, :meth:`_orm.Session.close` will
call ``.rollback`` on the transaction.
.. tip:: This mode of use is equivalent to how SQLAlchemy 1.4 would
handle a :class:`_engine.Connection` given with an existing
SAVEPOINT (i.e. :meth:`_engine.Connection.begin_nested`); the
:class:`_orm.Session` would take full control of the existing
SAVEPOINT.
* ``"rollback_only"`` - the :class:`_orm.Session` will take control
of the given transaction for ``.rollback()`` calls only;
``.commit()`` calls will not be propagated to the given
transaction. ``.close()`` calls will have no effect on the
given transaction.
.. tip:: This mode of use is equivalent to how SQLAlchemy 1.4 would
handle a :class:`_engine.Connection` given with an existing
regular database transaction (i.e.
:meth:`_engine.Connection.begin`); the :class:`_orm.Session`
would propagate :meth:`_orm.Session.rollback` calls to the
underlying transaction, but not :meth:`_orm.Session.commit` or
:meth:`_orm.Session.close` calls.
.. versionadded:: 2.0.0rc1
:param close_resets_only: Defaults to ``True``. Determines if
the session should reset itself after calling ``.close()``
or should pass in a no longer usable state, disabling re-use.
.. versionadded:: 2.0.22 added flag ``close_resets_only``.
A future SQLAlchemy version may change the default value of
this flag to ``False``.
.. seealso::
:ref:`session_closing` - Detail on the semantics of
:meth:`_orm.Session.close` and :meth:`_orm.Session.reset`.
""" # noqa
# considering allowing the "autocommit" keyword to still be accepted
# as long as it's False, so that external test suites, oslo.db etc
# continue to function as the argument appears to be passed in lots
# of cases including in our own test suite
if autocommit:
raise sa_exc.ArgumentError(
"autocommit=True is no longer supported"
)
self.identity_map = identity._WeakInstanceDict()
if not future:
raise sa_exc.ArgumentError(
"The 'future' parameter passed to "
"Session() may only be set to True."
)
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self._transaction = None
self._nested_transaction = None
self.hash_key = _new_sessionid()
self.autobegin = autobegin
self.autoflush = autoflush
self.expire_on_commit = expire_on_commit
self.enable_baked_queries = enable_baked_queries
if execution_options:
self.execution_options = self.execution_options.union(
execution_options
)
# the idea is that at some point NO_ARG will warn that in the future
# the default will switch to close_resets_only=False.
if close_resets_only in (True, _NoArg.NO_ARG):
self._close_state = _SessionCloseState.CLOSE_IS_RESET
else:
self._close_state = _SessionCloseState.ACTIVE
if (
join_transaction_mode
and join_transaction_mode
not in JoinTransactionMode.__args__ # type: ignore
):
raise sa_exc.ArgumentError(
f"invalid selection for join_transaction_mode: "
f'"{join_transaction_mode}"'
)
self.join_transaction_mode = join_transaction_mode
self.twophase = twophase
self._query_cls = query_cls if query_cls else query.Query
if info:
self.info.update(info)
if binds is not None:
for key, bind in binds.items():
self._add_bind(key, bind)
_sessions[self.hash_key] = self
# used by sqlalchemy.engine.util.TransactionalContext
_trans_context_manager: Optional[TransactionalContext] = None
connection_callable: Optional[_ConnectionCallableProto] = None
def __enter__(self: _S) -> _S:
return self
def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
self.close()
@contextlib.contextmanager
def _maker_context_manager(self: _S) -> Iterator[_S]:
with self:
with self.begin():
yield self
def in_transaction(self) -> bool:
"""Return True if this :class:`_orm.Session` has begun a transaction.
.. versionadded:: 1.4
.. seealso::
:attr:`_orm.Session.is_active`
"""
return self._transaction is not None
def in_nested_transaction(self) -> bool:
"""Return True if this :class:`_orm.Session` has begun a nested
transaction, e.g. SAVEPOINT.
.. versionadded:: 1.4
"""
return self._nested_transaction is not None
def get_transaction(self) -> Optional[SessionTransaction]:
"""Return the current root transaction in progress, if any.
.. versionadded:: 1.4
"""
trans = self._transaction
while trans is not None and trans._parent is not None:
trans = trans._parent
return trans
def get_nested_transaction(self) -> Optional[SessionTransaction]:
"""Return the current nested transaction in progress, if any.
.. versionadded:: 1.4
"""
return self._nested_transaction
@util.memoized_property
def info(self) -> _InfoType:
"""A user-modifiable dictionary.
The initial value of this dictionary can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
"""
return {}
def _autobegin_t(self, begin: bool = False) -> SessionTransaction:
if self._transaction is None:
if not begin and not self.autobegin:
raise sa_exc.InvalidRequestError(
"Autobegin is disabled on this Session; please call "
"session.begin() to start a new transaction"
)
trans = SessionTransaction(
self,
(
SessionTransactionOrigin.BEGIN
if begin
else SessionTransactionOrigin.AUTOBEGIN
),
)
assert self._transaction is trans
return trans
return self._transaction
def begin(self, nested: bool = False) -> SessionTransaction:
"""Begin a transaction, or nested transaction,
on this :class:`.Session`, if one is not already begun.
The :class:`_orm.Session` object features **autobegin** behavior,
so that normally it is not necessary to call the
:meth:`_orm.Session.begin`
method explicitly. However, it may be used in order to control
the scope of when the transactional state is begun.
When used to begin the outermost transaction, an error is raised
if this :class:`.Session` is already inside of a transaction.
:param nested: if True, begins a SAVEPOINT transaction and is
equivalent to calling :meth:`~.Session.begin_nested`. For
documentation on SAVEPOINT transactions, please see
:ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction`
acts as a Python context manager, allowing :meth:`.Session.begin`
to be used in a "with" block. See :ref:`session_explicit_begin` for
an example.
.. seealso::
:ref:`session_autobegin`
:ref:`unitofwork_transaction`
:meth:`.Session.begin_nested`
"""
trans = self._transaction
if trans is None:
trans = self._autobegin_t(begin=True)
if not nested:
return trans
assert trans is not None
if nested:
trans = trans._begin(nested=nested)
assert self._transaction is trans
self._nested_transaction = trans
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun on this Session."
)
return trans # needed for __enter__/__exit__ hook
def begin_nested(self) -> SessionTransaction:
"""Begin a "nested" transaction on this Session, e.g. SAVEPOINT.
The target database(s) and associated drivers must support SQL
SAVEPOINT for this method to function correctly.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction` acts as a context manager, allowing
:meth:`.Session.begin_nested` to be used in a "with" block.
See :ref:`session_begin_nested` for a usage example.
.. seealso::
:ref:`session_begin_nested`
:ref:`pysqlite_serializable` - special workarounds required
with the SQLite driver in order for SAVEPOINT to work
correctly. For asyncio use cases, see the section
:ref:`aiosqlite_serializable`.
"""
return self.begin(nested=True)
def rollback(self) -> None:
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
The method always rolls back
the topmost database transaction, discarding any nested
transactions that may be in progress.
.. seealso::
:ref:`session_rollback`
:ref:`unitofwork_transaction`
"""
if self._transaction is None:
pass
else:
self._transaction.rollback(_to_root=True)
def commit(self) -> None:
"""Flush pending changes and commit the current transaction.
When the COMMIT operation is complete, all objects are fully
:term:`expired`, erasing their internal contents, which will be
automatically re-loaded when the objects are next accessed. In the
interim, these objects are in an expired state and will not function if
they are :term:`detached` from the :class:`.Session`. Additionally,
this re-load operation is not supported when using asyncio-oriented
APIs. The :paramref:`.Session.expire_on_commit` parameter may be used
to disable this behavior.
When there is no transaction in place for the :class:`.Session`,
indicating that no operations were invoked on this :class:`.Session`
since the previous call to :meth:`.Session.commit`, the method will
begin and commit an internal-only "logical" transaction, that does not
normally affect the database unless pending flush changes were
detected, but will still invoke event handlers and object expiration
rules.
The outermost database transaction is committed unconditionally,
automatically releasing any SAVEPOINTs in effect.
.. seealso::
:ref:`session_committing`
:ref:`unitofwork_transaction`
:ref:`asyncio_orm_avoid_lazyloads`
"""
trans = self._transaction
if trans is None:
trans = self._autobegin_t()
trans.commit(_to_root=True)
def prepare(self) -> None:
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
trans = self._transaction
if trans is None:
trans = self._autobegin_t()
trans.prepare()
def connection(
self,
bind_arguments: Optional[_BindArguments] = None,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> Connection:
r"""Return a :class:`_engine.Connection` object corresponding to this
:class:`.Session` object's transactional state.
Either the :class:`_engine.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`_engine.Connection`
returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind_arguments: dictionary of bind arguments. May include
"mapper", "bind", "clause", other custom arguments that are passed
to :meth:`.Session.get_bind`.
:param execution_options: a dictionary of execution options that will
be passed to :meth:`_engine.Connection.execution_options`, **when the
connection is first procured only**. If the connection is already
present within the :class:`.Session`, a warning is emitted and
the arguments are ignored.
.. seealso::
:ref:`session_transaction_isolation`
"""
if bind_arguments:
bind = bind_arguments.pop("bind", None)
if bind is None:
bind = self.get_bind(**bind_arguments)
else:
bind = self.get_bind()
return self._connection_for_bind(
bind,
execution_options=execution_options,
)
def _connection_for_bind(
self,
engine: _SessionBind,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
**kw: Any,
) -> Connection:
TransactionalContext._trans_ctx_check(self)
trans = self._transaction
if trans is None:
trans = self._autobegin_t()
return trans._connection_for_bind(engine, execution_options)
@overload
def _execute_internal(
self,
statement: Executable,
params: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
_scalar_result: Literal[True] = ...,
) -> Any: ...
@overload
def _execute_internal(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
_scalar_result: bool = ...,
) -> Result[Unpack[TupleAny]]: ...
def _execute_internal(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
_scalar_result: bool = False,
) -> Any:
statement = coercions.expect(roles.StatementRole, statement)
if not bind_arguments:
bind_arguments = {}
else:
bind_arguments = dict(bind_arguments)
if (
statement._propagate_attrs.get("compile_state_plugin", None)
== "orm"
):
compile_state_cls = CompileState._get_plugin_class_for_plugin(
statement, "orm"
)
if TYPE_CHECKING:
assert isinstance(
compile_state_cls, context._AbstractORMCompileState
)
else:
compile_state_cls = None
bind_arguments.setdefault("clause", statement)
combined_execution_options: util.immutabledict[str, Any] = (
util.coerce_to_immutabledict(execution_options)
)
if self.execution_options:
# merge given execution options with session-wide execution
# options. if the statement also has execution_options,
# maintain priority of session.execution_options ->
# statement.execution_options -> method passed execution_options
# by omitting from the base execution options those keys that
# will come from the statement
if statement._execution_options:
combined_execution_options = util.immutabledict(
{
k: v
for k, v in self.execution_options.items()
if k not in statement._execution_options
}
).union(combined_execution_options)
else:
combined_execution_options = self.execution_options.union(
combined_execution_options
)
if _parent_execute_state:
events_todo = _parent_execute_state._remaining_events()
else:
events_todo = self.dispatch.do_orm_execute
if _add_event:
events_todo = list(events_todo) + [_add_event]
if events_todo:
if compile_state_cls is not None:
# for event handlers, do the orm_pre_session_exec
# pass ahead of the event handlers, so that things like
# .load_options, .update_delete_options etc. are populated.
# is_pre_event=True allows the hook to hold off on things
# it doesn't want to do twice, including autoflush as well
# as "pre fetch" for DML, etc.
(
statement,
combined_execution_options,
params,
) = compile_state_cls.orm_pre_session_exec(
self,
statement,
params,
combined_execution_options,
bind_arguments,
True,
)
orm_exec_state = ORMExecuteState(
self,
statement,
params,
combined_execution_options,
bind_arguments,
compile_state_cls,
events_todo,
)
for idx, fn in enumerate(events_todo):
orm_exec_state._starting_event_idx = idx
fn_result: Optional[Result[Unpack[TupleAny]]] = fn(
orm_exec_state
)
if fn_result:
if _scalar_result:
return fn_result.scalar()
else:
return fn_result
statement = orm_exec_state.statement
combined_execution_options = orm_exec_state.local_execution_options
params = orm_exec_state.parameters
if compile_state_cls is not None:
# now run orm_pre_session_exec() "for real". if there were
# event hooks, this will re-run the steps that interpret
# new execution_options into load_options / update_delete_options,
# which we assume the event hook might have updated.
# autoflush will also be invoked in this step if enabled.
(
statement,
combined_execution_options,
params,
) = compile_state_cls.orm_pre_session_exec(
self,
statement,
params,
combined_execution_options,
bind_arguments,
False,
)
else:
# Issue #9809: unconditionally autoflush for Core statements
self._autoflush()
bind = self.get_bind(**bind_arguments)
conn = self._connection_for_bind(bind)
if _scalar_result and not compile_state_cls:
if TYPE_CHECKING:
params = cast(_CoreSingleExecuteParams, params)
return conn.scalar(
statement,
params or {},
execution_options=combined_execution_options,
)
if compile_state_cls:
result: Result[Unpack[TupleAny]] = (
compile_state_cls.orm_execute_statement(
self,
statement,
params or {},
combined_execution_options,
bind_arguments,
conn,
)
)
else:
result = conn.execute(
statement, params, execution_options=combined_execution_options
)
if _scalar_result:
return result.scalar()
else:
return result
@overload
def execute(
self,
statement: TypedReturnsRows[Unpack[_Ts]],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Unpack[_Ts]]: ...
@overload
def execute(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Unpack[TupleAny]]: ...
def execute(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Unpack[TupleAny]]:
r"""Execute a SQL expression construct.
Returns a :class:`_engine.Result` object representing
results of the statement execution.
E.g.::
from sqlalchemy import select
result = session.execute(select(User).where(User.id == 5))
The API contract of :meth:`_orm.Session.execute` is similar to that
of :meth:`_engine.Connection.execute`, the :term:`2.0 style` version
of :class:`_engine.Connection`.
.. versionchanged:: 1.4 the :meth:`_orm.Session.execute` method is
now the primary point of ORM statement execution when using
:term:`2.0 style` ORM usage.
:param statement:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`_expression.select`).
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param execution_options: optional dictionary of execution options,
which will be associated with the statement execution. This
dictionary can provide a subset of the options that are accepted
by :meth:`_engine.Connection.execution_options`, and may also
provide additional options understood only in an ORM context.
The execution_options are passed along to methods like
:meth:`.Connection.execute` on :class:`.Connection` giving the
highest priority to execution_options that are passed to this
method explicitly, then the options that are present on the
statement object if any, and finally those options present
session-wide.
.. seealso::
:ref:`orm_queryguide_execution_options` - ORM-specific execution
options
:param bind_arguments: dictionary of additional arguments to determine
the bind. May include "mapper", "bind", or other custom arguments.
Contents of this dictionary are passed to the
:meth:`.Session.get_bind` method.
:return: a :class:`_engine.Result` object.
"""
return self._execute_internal(
statement,
params,
execution_options=execution_options,
bind_arguments=bind_arguments,
_parent_execute_state=_parent_execute_state,
_add_event=_add_event,
)
@overload
def scalar(
self,
statement: TypedReturnsRows[_T],
params: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Optional[_T]: ...
@overload
def scalar(
self,
statement: Executable,
params: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Any: ...
def scalar(
self,
statement: Executable,
params: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Any:
"""Execute a statement and return a scalar result.
Usage and parameters are the same as that of
:meth:`_orm.Session.execute`; the return result is a scalar Python
value.
"""
return self._execute_internal(
statement,
params,
execution_options=execution_options,
bind_arguments=bind_arguments,
_scalar_result=True,
**kw,
)
@overload
def scalars(
self,
statement: TypedReturnsRows[_T],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[_T]: ...
@overload
def scalars(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[Any]: ...
def scalars(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[Any]:
"""Execute a statement and return the results as scalars.
Usage and parameters are the same as that of
:meth:`_orm.Session.execute`; the return result is a
:class:`_result.ScalarResult` filtering object which
will return single elements rather than :class:`_row.Row` objects.
:return: a :class:`_result.ScalarResult` object
.. versionadded:: 1.4.24 Added :meth:`_orm.Session.scalars`
.. versionadded:: 1.4.26 Added :meth:`_orm.scoped_session.scalars`
.. seealso::
:ref:`orm_queryguide_select_orm_entities` - contrasts the behavior
of :meth:`_orm.Session.execute` to :meth:`_orm.Session.scalars`
"""
return self._execute_internal(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
_scalar_result=False, # mypy appreciates this
**kw,
).scalars()
def close(self) -> None:
"""Close out the transactional resources and ORM objects used by this
:class:`_orm.Session`.
This expunges all ORM objects associated with this
:class:`_orm.Session`, ends any transaction in progress and
:term:`releases` any :class:`_engine.Connection` objects which this
:class:`_orm.Session` itself has checked out from associated
:class:`_engine.Engine` objects. The operation then leaves the
:class:`_orm.Session` in a state which it may be used again.
.. tip::
In the default running mode the :meth:`_orm.Session.close`
method **does not prevent the Session from being used again**.
The :class:`_orm.Session` itself does not actually have a
distinct "closed" state; it merely means
the :class:`_orm.Session` will release all database connections
and ORM objects.
Setting the parameter :paramref:`_orm.Session.close_resets_only`
to ``False`` will instead make the ``close`` final, meaning that
any further action on the session will be forbidden.
.. versionchanged:: 1.4 The :meth:`.Session.close` method does not
immediately create a new :class:`.SessionTransaction` object;
instead, the new :class:`.SessionTransaction` is created only if
the :class:`.Session` is used again for a database operation.
.. seealso::
:ref:`session_closing` - detail on the semantics of
:meth:`_orm.Session.close` and :meth:`_orm.Session.reset`.
:meth:`_orm.Session.reset` - a similar method that behaves like
``close()`` with the parameter
:paramref:`_orm.Session.close_resets_only` set to ``True``.
"""
self._close_impl(invalidate=False)
def reset(self) -> None:
"""Close out the transactional resources and ORM objects used by this
:class:`_orm.Session`, resetting the session to its initial state.
This method provides for same "reset-only" behavior that the
:meth:`_orm.Session.close` method has provided historically, where the
state of the :class:`_orm.Session` is reset as though the object were
brand new, and ready to be used again.
This method may then be useful for :class:`_orm.Session` objects
which set :paramref:`_orm.Session.close_resets_only` to ``False``,
so that "reset only" behavior is still available.
.. versionadded:: 2.0.22
.. seealso::
:ref:`session_closing` - detail on the semantics of
:meth:`_orm.Session.close` and :meth:`_orm.Session.reset`.
:meth:`_orm.Session.close` - a similar method will additionally
prevent re-use of the Session when the parameter
:paramref:`_orm.Session.close_resets_only` is set to ``False``.
"""
self._close_impl(invalidate=False, is_reset=True)
def invalidate(self) -> None:
"""Close this Session, using connection invalidation.
This is a variant of :meth:`.Session.close` that will additionally
ensure that the :meth:`_engine.Connection.invalidate`
method will be called on each :class:`_engine.Connection` object
that is currently in use for a transaction (typically there is only
one connection unless the :class:`_orm.Session` is used with
multiple engines).
This can be called when the database is known to be in a state where
the connections are no longer safe to be used.
Below illustrates a scenario when using `gevent
<https://www.gevent.org/>`_, which can produce ``Timeout`` exceptions
that may mean the underlying connection should be discarded::
import gevent
try:
sess = Session()
sess.add(User())
sess.commit()
except gevent.Timeout:
sess.invalidate()
raise
except:
sess.rollback()
raise
The method additionally does everything that :meth:`_orm.Session.close`
does, including that all ORM objects are expunged.
"""
self._close_impl(invalidate=True)
def _close_impl(self, invalidate: bool, is_reset: bool = False) -> None:
if not is_reset and self._close_state is _SessionCloseState.ACTIVE:
self._close_state = _SessionCloseState.CLOSED
self.expunge_all()
if self._transaction is not None:
for transaction in self._transaction._iterate_self_and_parents():
transaction.close(invalidate)
def expunge_all(self) -> None:
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
all_states = self.identity_map.all_states() + list(self._new)
self.identity_map._kill()
self.identity_map = identity._WeakInstanceDict()
self._new = {}
self._deleted = {}
statelib.InstanceState._detach_states(all_states, self)
def _add_bind(self, key: _SessionBindKey, bind: _SessionBind) -> None:
try:
insp = inspect(key)
except sa_exc.NoInspectionAvailable as err:
if not isinstance(key, type):
raise sa_exc.ArgumentError(
"Not an acceptable bind target: %s" % key
) from err
else:
self.__binds[key] = bind
else:
if TYPE_CHECKING:
assert isinstance(insp, Inspectable)
if isinstance(insp, TableClause):
self.__binds[insp] = bind
elif insp_is_mapper(insp):
self.__binds[insp.class_] = bind
for _selectable in insp._all_tables:
self.__binds[_selectable] = bind
else:
raise sa_exc.ArgumentError(
"Not an acceptable bind target: %s" % key
)
def bind_mapper(
self, mapper: _EntityBindKey[_O], bind: _SessionBind
) -> None:
"""Associate a :class:`_orm.Mapper` or arbitrary Python class with a
"bind", e.g. an :class:`_engine.Engine` or
:class:`_engine.Connection`.
The given entity is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param mapper: a :class:`_orm.Mapper` object,
or an instance of a mapped
class, or any Python class that is the base of a set of mapped
classes.
:param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_table`
"""
self._add_bind(mapper, bind)
def bind_table(self, table: TableClause, bind: _SessionBind) -> None:
"""Associate a :class:`_schema.Table` with a "bind", e.g. an
:class:`_engine.Engine`
or :class:`_engine.Connection`.
The given :class:`_schema.Table` is added to a lookup used by the
:meth:`.Session.get_bind` method.
:param table: a :class:`_schema.Table` object,
which is typically the target
of an ORM mapping, or is present within a selectable that is
mapped.
:param bind: an :class:`_engine.Engine` or :class:`_engine.Connection`
object.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
"""
self._add_bind(table, bind)
def get_bind(
self,
mapper: Optional[_EntityBindKey[_O]] = None,
*,
clause: Optional[ClauseElement] = None,
bind: Optional[_SessionBind] = None,
_sa_skip_events: Optional[bool] = None,
_sa_skip_for_implicit_returning: bool = False,
**kw: Any,
) -> Union[Engine, Connection]:
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`_engine.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`_engine.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and :paramref:`.Session.binds` is present,
locate a bind based first on the mapper in use, then
on the mapped class in use, then on any base classes that are
present in the ``__mro__`` of the mapped class, from more specific
superclasses to more general.
2. if clause given and ``Session.binds`` is present,
locate a bind based on :class:`_schema.Table` objects
found in the given clause present in ``Session.binds``.
3. if ``Session.binds`` is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the :class:`_schema.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
Note that the :meth:`.Session.get_bind` method can be overridden on
a user-defined subclass of :class:`.Session` to provide any kind
of bind resolution scheme. See the example at
:ref:`session_custom_partitioning`.
:param mapper:
Optional mapped class or corresponding :class:`_orm.Mapper` instance.
The bind can be derived from a :class:`_orm.Mapper` first by
consulting the "binds" map associated with this :class:`.Session`,
and secondly by consulting the :class:`_schema.MetaData` associated
with the :class:`_schema.Table` to which the :class:`_orm.Mapper` is
mapped for a bind.
:param clause:
A :class:`_expression.ClauseElement` (i.e.
:func:`_expression.select`,
:func:`_expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`_schema.Table`
associated with
bound :class:`_schema.MetaData`.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
"""
# this function is documented as a subclassing hook, so we have
# to call this method even if the return is simple
if bind:
return bind
elif not self.__binds and self.bind:
# simplest and most common case, we have a bind and no
# per-mapper/table binds, we're done
return self.bind
# we don't have self.bind and either have self.__binds
# or we don't have self.__binds (which is legacy). Look at the
# mapper and the clause
if mapper is None and clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding."
)
# look more closely at the mapper.
if mapper is not None:
try:
inspected_mapper = inspect(mapper)
except sa_exc.NoInspectionAvailable as err:
if isinstance(mapper, type):
raise exc.UnmappedClassError(mapper) from err
else:
raise
else:
inspected_mapper = None
# match up the mapper or clause in the __binds
if self.__binds:
# matching mappers and selectables to entries in the
# binds dictionary; supported use case.
if inspected_mapper:
for cls in inspected_mapper.class_.__mro__:
if cls in self.__binds:
return self.__binds[cls]
if clause is None:
clause = inspected_mapper.persist_selectable
if clause is not None:
plugin_subject = clause._propagate_attrs.get(
"plugin_subject", None
)
if plugin_subject is not None:
for cls in plugin_subject.mapper.class_.__mro__:
if cls in self.__binds:
return self.__binds[cls]
for obj in visitors.iterate(clause):
if obj in self.__binds:
if TYPE_CHECKING:
assert isinstance(obj, Table)
return self.__binds[obj]
# none of the __binds matched, but we have a fallback bind.
# return that
if self.bind:
return self.bind
context = []
if inspected_mapper is not None:
context.append(f"mapper {inspected_mapper}")
if clause is not None:
context.append("SQL expression")
raise sa_exc.UnboundExecutionError(
f"Could not locate a bind configured on "
f'{", ".join(context)} or this Session.'
)
@overload
def query(self, _entity: _EntityType[_O]) -> Query[_O]: ...
@overload
def query(
self, _colexpr: TypedColumnsClauseRole[_T]
) -> RowReturningQuery[_T]: ...
# START OVERLOADED FUNCTIONS self.query RowReturningQuery 2-8
# code within this block is **programmatically,
# statically generated** by tools/generate_tuple_map_overloads.py
@overload
def query(
self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], /
) -> RowReturningQuery[_T0, _T1]: ...
@overload
def query(
self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], /
) -> RowReturningQuery[_T0, _T1, _T2]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
/,
) -> RowReturningQuery[_T0, _T1, _T2, _T3]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
/,
) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
/,
) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4, _T5]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
__ent6: _TCCA[_T6],
/,
) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4, _T5, _T6]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
__ent6: _TCCA[_T6],
__ent7: _TCCA[_T7],
/,
*entities: _ColumnsClauseArgument[Any],
) -> RowReturningQuery[
_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7, Unpack[TupleAny]
]: ...
# END OVERLOADED FUNCTIONS self.query
@overload
def query(
self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any
) -> Query[Any]: ...
def query(
self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any
) -> Query[Any]:
"""Return a new :class:`_query.Query` object corresponding to this
:class:`_orm.Session`.
Note that the :class:`_query.Query` object is legacy as of
SQLAlchemy 2.0; the :func:`_sql.select` construct is now used
to construct ORM queries.
.. seealso::
:ref:`unified_tutorial`
:ref:`queryguide_toplevel`
:ref:`query_api_toplevel` - legacy API doc
"""
return self._query_cls(entities, self, **kwargs)
def _identity_lookup(
self,
mapper: Mapper[_O],
primary_key_identity: Union[Any, Tuple[Any, ...]],
identity_token: Any = None,
passive: PassiveFlag = PassiveFlag.PASSIVE_OFF,
lazy_loaded_from: Optional[InstanceState[Any]] = None,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
) -> Union[Optional[_O], LoaderCallableStatus]:
"""Locate an object in the identity map.
Given a primary key identity, constructs an identity key and then
looks in the session's identity map. If present, the object may
be run through unexpiration rules (e.g. load unloaded attributes,
check if was deleted).
e.g.::
obj = session._identity_lookup(inspect(SomeClass), (1,))
:param mapper: mapper in use
:param primary_key_identity: the primary key we are searching for, as
a tuple.
:param identity_token: identity token that should be used to create
the identity key. Used as is, however overriding subclasses can
repurpose this in order to interpret the value in a special way,
such as if None then look among multiple target tokens.
:param passive: passive load flag passed to
:func:`.loading.get_from_identity`, which impacts the behavior if
the object is found; the object may be validated and/or unexpired
if the flag allows for SQL to be emitted.
:param lazy_loaded_from: an :class:`.InstanceState` that is
specifically asking for this identity as a related identity. Used
for sharding schemes where there is a correspondence between an object
and a related object being lazy-loaded (or otherwise
relationship-loaded).
:return: None if the object is not found in the identity map, *or*
if the object was unexpired and found to have been deleted.
if passive flags disallow SQL and the object is expired, returns
PASSIVE_NO_RESULT. In all other cases the instance is returned.
.. versionchanged:: 1.4.0 - the :meth:`.Session._identity_lookup`
method was moved from :class:`_query.Query` to
:class:`.Session`, to avoid having to instantiate the
:class:`_query.Query` object.
"""
key = mapper.identity_key_from_primary_key(
primary_key_identity, identity_token=identity_token
)
# work around: https://github.com/python/typing/discussions/1143
return_value = loading.get_from_identity(self, mapper, key, passive)
return return_value
@util.non_memoized_property
@contextlib.contextmanager
def no_autoflush(self) -> Iterator[Session]:
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
"""
autoflush = self.autoflush
self.autoflush = False
try:
yield self
finally:
self.autoflush = autoflush
@util.langhelpers.tag_method_for_warnings(
"This warning originated from the Session 'autoflush' process, "
"which was invoked automatically in response to a user-initiated "
"operation. Consider using ``no_autoflush`` context manager if this "
"warning happened while initializing objects.",
sa_exc.SAWarning,
)
def _autoflush(self) -> None:
if self.autoflush and not self._flushing:
try:
self.flush()
except sa_exc.StatementError as e:
# note we are reraising StatementError as opposed to
# raising FlushError with "chaining" to remain compatible
# with code that catches StatementError, IntegrityError,
# etc.
e.add_detail(
"raised as a result of Query-invoked autoflush; "
"consider using a session.no_autoflush block if this "
"flush is occurring prematurely"
)
raise e.with_traceback(sys.exc_info()[2])
def refresh(
self,
instance: object,
attribute_names: Optional[Iterable[str]] = None,
with_for_update: ForUpdateParameter = None,
) -> None:
"""Expire and refresh attributes on the given instance.
The selected attributes will first be expired as they would when using
:meth:`_orm.Session.expire`; then a SELECT statement will be issued to
the database to refresh column-oriented attributes with the current
value available in the current transaction.
:func:`_orm.relationship` oriented attributes will also be immediately
loaded if they were already eagerly loaded on the object, using the
same eager loading strategy that they were loaded with originally.
.. versionadded:: 1.4 - the :meth:`_orm.Session.refresh` method
can also refresh eagerly loaded attributes.
:func:`_orm.relationship` oriented attributes that would normally
load using the ``select`` (or "lazy") loader strategy will also
load **if they are named explicitly in the attribute_names
collection**, emitting a SELECT statement for the attribute using the
``immediate`` loader strategy. If lazy-loaded relationships are not
named in :paramref:`_orm.Session.refresh.attribute_names`, then
they remain as "lazy loaded" attributes and are not implicitly
refreshed.
.. versionchanged:: 2.0.4 The :meth:`_orm.Session.refresh` method
will now refresh lazy-loaded :func:`_orm.relationship` oriented
attributes for those which are named explicitly in the
:paramref:`_orm.Session.refresh.attribute_names` collection.
.. tip::
While the :meth:`_orm.Session.refresh` method is capable of
refreshing both column and relationship oriented attributes, its
primary focus is on refreshing of local column-oriented attributes
on a single instance. For more open ended "refresh" functionality,
including the ability to refresh the attributes on many objects at
once while having explicit control over relationship loader
strategies, use the
:ref:`populate existing <orm_queryguide_populate_existing>` feature
instead.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction. Refreshing
attributes usually only makes sense at the start of a transaction
where database rows have not yet been accessed.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param with_for_update: optional boolean ``True`` indicating FOR UPDATE
should be used, or may be a dictionary containing flags to
indicate a more specific set of FOR UPDATE flags for the SELECT;
flags should match the parameters of
:meth:`_query.Query.with_for_update`.
Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.expire_all`
:ref:`orm_queryguide_populate_existing` - allows any ORM query
to refresh objects as they would be loaded normally.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
raise exc.UnmappedInstanceError(instance) from err
self._expire_state(state, attribute_names)
# this autoflush previously used to occur as a secondary effect
# of the load_on_ident below. Meaning we'd organize the SELECT
# based on current DB pks, then flush, then if pks changed in that
# flush, crash. this was unticketed but discovered as part of
# #8703. So here, autoflush up front, dont autoflush inside
# load_on_ident.
self._autoflush()
if with_for_update == {}:
raise sa_exc.ArgumentError(
"with_for_update should be the boolean value "
"True, or a dictionary with options. "
"A blank dictionary is ambiguous."
)
with_for_update = ForUpdateArg._from_argument(with_for_update)
stmt: Select[Unpack[TupleAny]] = sql.select(object_mapper(instance))
if (
loading._load_on_ident(
self,
stmt,
state.key,
refresh_state=state,
with_for_update=with_for_update,
only_load_props=attribute_names,
require_pk_cols=True,
# technically unnecessary as we just did autoflush
# above, however removes the additional unnecessary
# call to _autoflush()
no_autoflush=True,
is_user_refresh=True,
)
is None
):
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" % instance_str(instance)
)
def expire_all(self) -> None:
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` is not usually needed,
assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
:meth:`_orm.Query.populate_existing`
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(
self, instance: object, attribute_names: Optional[Iterable[str]] = None
) -> None:
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
:meth:`_orm.Query.populate_existing`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
raise exc.UnmappedInstanceError(instance) from err
self._expire_state(state, attribute_names)
def _expire_state(
self,
state: InstanceState[Any],
attribute_names: Optional[Iterable[str]],
) -> None:
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(
state.manager.mapper.cascade_iterator("refresh-expire", state)
)
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(
self, state: InstanceState[Any], autoflush: Optional[bool] = None
) -> None:
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach(self)
def expunge(self, instance: object) -> None:
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
raise exc.UnmappedInstanceError(instance) from err
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" % state_str(state)
)
cascaded = list(
state.manager.mapper.cascade_iterator("expunge", state)
)
self._expunge_states([state] + [st_ for o, m, st_, dct_ in cascaded])
def _expunge_states(
self, states: Iterable[InstanceState[Any]], to_transient: bool = False
) -> None:
for state in states:
if state in self._new:
self._new.pop(state)
elif self.identity_map.contains_state(state):
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
elif self._transaction:
# state is "detached" from being deleted, but still present
# in the transaction snapshot
self._transaction._deleted.pop(state, None)
statelib.InstanceState._detach_states(
states, self, to_transient=to_transient
)
def _register_persistent(self, states: Set[InstanceState[Any]]) -> None:
"""Register all persistent objects from a flush.
This is used both for pending objects moving to the persistent
state as well as already persistent objects.
"""
pending_to_persistent = self.dispatch.pending_to_persistent or None
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if (
_none_set.intersection(instance_key[1])
and not mapper.allow_partial_pks
or _none_set.issuperset(instance_key[1])
):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such as within a load() event."
% state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use safe_discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.safe_discard(state)
trans = self._transaction
assert trans is not None
if state in trans._key_switches:
orig_key = trans._key_switches[state][0]
else:
orig_key = state.key
trans._key_switches[state] = (
orig_key,
instance_key,
)
state.key = instance_key
# there can be an existing state in the identity map
# that is replaced when the primary keys of two instances
# are swapped; see test/orm/test_naturalpks.py -> test_reverse
old = self.identity_map.replace(state)
if (
old is not None
and mapper._identity_key_from_state(old) == instance_key
and old.obj() is not None
):
util.warn(
"Identity map already had an identity for %s, "
"replacing it with newly flushed object. Are there "
"load operations occurring inside of an event handler "
"within the flush?" % (instance_key,)
)
state._orphaned_outside_of_session = False
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states), self.identity_map
)
self._register_altered(states)
if pending_to_persistent is not None:
for state in states.intersection(self._new):
pending_to_persistent(self, state)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states: Iterable[InstanceState[Any]]) -> None:
if self._transaction:
for state in states:
if state in self._new:
self._transaction._new[state] = True
else:
self._transaction._dirty[state] = True
def _remove_newly_deleted(
self, states: Iterable[InstanceState[Any]]
) -> None:
persistent_to_deleted = self.dispatch.persistent_to_deleted or None
for state in states:
if self._transaction:
self._transaction._deleted[state] = True
if persistent_to_deleted is not None:
# get a strong reference before we pop out of
# self._deleted
obj = state.obj() # noqa
self.identity_map.safe_discard(state)
self._deleted.pop(state, None)
state._deleted = True
# can't call state._detach() here, because this state
# is still in the transaction snapshot and needs to be
# tracked as part of that
if persistent_to_deleted is not None:
persistent_to_deleted(self, state)
def add(self, instance: object, *, _warn: bool = True) -> None:
"""Place an object into this :class:`_orm.Session`.
Objects that are in the :term:`transient` state when passed to the
:meth:`_orm.Session.add` method will move to the
:term:`pending` state, until the next flush, at which point they
will move to the :term:`persistent` state.
Objects that are in the :term:`detached` state when passed to the
:meth:`_orm.Session.add` method will move to the :term:`persistent`
state directly.
If the transaction used by the :class:`_orm.Session` is rolled back,
objects which were transient when they were passed to
:meth:`_orm.Session.add` will be moved back to the
:term:`transient` state, and will no longer be present within this
:class:`_orm.Session`.
.. seealso::
:meth:`_orm.Session.add_all`
:ref:`session_adding` - at :ref:`session_basics`
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
raise exc.UnmappedInstanceError(instance) from err
self._save_or_update_state(state)
def add_all(self, instances: Iterable[object]) -> None:
"""Add the given collection of instances to this :class:`_orm.Session`.
See the documentation for :meth:`_orm.Session.add` for a general
behavioral description.
.. seealso::
:meth:`_orm.Session.add`
:ref:`session_adding` - at :ref:`session_basics`
"""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state: InstanceState[Any]) -> None:
state._orphaned_outside_of_session = False
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
"save-update", state, halt_on=self._contains_state
):
self._save_or_update_impl(st_)
def delete(self, instance: object) -> None:
"""Mark an instance as deleted.
The object is assumed to be either :term:`persistent` or
:term:`detached` when passed; after the method is called, the
object will remain in the :term:`persistent` state until the next
flush proceeds. During this time, the object will also be a member
of the :attr:`_orm.Session.deleted` collection.
When the next flush proceeds, the object will move to the
:term:`deleted` state, indicating a ``DELETE`` statement was emitted
for its row within the current transaction. When the transaction
is successfully committed,
the deleted object is moved to the :term:`detached` state and is
no longer present within this :class:`_orm.Session`.
.. seealso::
:ref:`session_deleting` - at :ref:`session_basics`
:meth:`.Session.delete_all` - multiple instance version
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
self._delete_impl(object_state(instance), instance, head=True)
def delete_all(self, instances: Iterable[object]) -> None:
"""Calls :meth:`.Session.delete` on multiple instances.
.. seealso::
:meth:`.Session.delete` - main documentation on delete
.. versionadded:: 2.1
"""
if self._warn_on_events:
self._flush_warning("Session.delete_all()")
for instance in instances:
self._delete_impl(object_state(instance), instance, head=True)
def _delete_impl(
self, state: InstanceState[Any], obj: object, head: bool
) -> None:
if state.key is None:
if head:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
else:
return
to_attach = self._before_attach(state, obj)
if state in self._deleted:
return
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
if head:
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(
state.manager.mapper.cascade_iterator("delete", state)
)
else:
cascade_states = None
self._deleted[state] = obj
if head:
if TYPE_CHECKING:
assert cascade_states is not None
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_, o, False)
def get(
self,
entity: _EntityBindKey[_O],
ident: _PKIdentityArgument,
*,
options: Optional[Sequence[ORMOption]] = None,
populate_existing: bool = False,
with_for_update: ForUpdateParameter = None,
identity_token: Optional[Any] = None,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
) -> Optional[_O]:
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.get(User, 5)
some_object = session.get(VersionedFoo, (5, 10))
some_object = session.get(VersionedFoo, {"id": 5, "version_id": 10})
.. versionadded:: 1.4 Added :meth:`_orm.Session.get`, which is moved
from the now legacy :meth:`_orm.Query.get` method.
:meth:`_orm.Session.get` is special in that it provides direct
access to the identity map of the :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`_orm.Session.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:param entity: a mapped class or :class:`.Mapper` indicating the
type of entity to be loaded.
:param ident: A scalar, tuple, or dictionary representing the
primary key. For a composite (e.g. multiple column) primary key,
a tuple or dictionary should be passed.
For a single-column primary key, the scalar calling form is typically
the most expedient. If the primary key of a row is the value "5",
the call looks like::
my_object = session.get(SomeClass, 5)
The tuple form contains primary key values typically in
the order in which they correspond to the mapped
:class:`_schema.Table`
object's primary key columns, or if the
:paramref:`_orm.Mapper.primary_key` configuration parameter were
used, in
the order used for that parameter. For example, if the primary key
of a row is represented by the integer
digits "5, 10" the call would look like::
my_object = session.get(SomeClass, (5, 10))
The dictionary form should include as keys the mapped attribute names
corresponding to each element of the primary key. If the mapped class
has the attributes ``id``, ``version_id`` as the attributes which
store the object's primary key value, the call would look like::
my_object = session.get(SomeClass, {"id": 5, "version_id": 10})
:param options: optional sequence of loader options which will be
applied to the query, if one is emitted.
:param populate_existing: causes the method to unconditionally emit
a SQL query and refresh the object with the newly loaded data,
regardless of whether or not the object is already present.
:param with_for_update: optional boolean ``True`` indicating FOR UPDATE
should be used, or may be a dictionary containing flags to
indicate a more specific set of FOR UPDATE flags for the SELECT;
flags should match the parameters of
:meth:`_query.Query.with_for_update`.
Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
:param execution_options: optional dictionary of execution options,
which will be associated with the query execution if one is emitted.
This dictionary can provide a subset of the options that are
accepted by :meth:`_engine.Connection.execution_options`, and may
also provide additional options understood only in an ORM context.
.. versionadded:: 1.4.29
.. seealso::
:ref:`orm_queryguide_execution_options` - ORM-specific execution
options
:param bind_arguments: dictionary of additional arguments to determine
the bind. May include "mapper", "bind", or other custom arguments.
Contents of this dictionary are passed to the
:meth:`.Session.get_bind` method.
.. versionadded:: 2.0.0rc1
:return: The object instance, or ``None``.
""" # noqa: E501
return self._get_impl(
entity,
ident,
loading._load_on_pk_identity,
options=options,
populate_existing=populate_existing,
with_for_update=with_for_update,
identity_token=identity_token,
execution_options=execution_options,
bind_arguments=bind_arguments,
)
def get_one(
self,
entity: _EntityBindKey[_O],
ident: _PKIdentityArgument,
*,
options: Optional[Sequence[ORMOption]] = None,
populate_existing: bool = False,
with_for_update: ForUpdateParameter = None,
identity_token: Optional[Any] = None,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
) -> _O:
"""Return exactly one instance based on the given primary key
identifier, or raise an exception if not found.
Raises :class:`_exc.NoResultFound` if the query selects no rows.
For a detailed documentation of the arguments see the
method :meth:`.Session.get`.
.. versionadded:: 2.0.22
:return: The object instance.
.. seealso::
:meth:`.Session.get` - equivalent method that instead
returns ``None`` if no row was found with the provided primary
key
"""
instance = self.get(
entity,
ident,
options=options,
populate_existing=populate_existing,
with_for_update=with_for_update,
identity_token=identity_token,
execution_options=execution_options,
bind_arguments=bind_arguments,
)
if instance is None:
raise sa_exc.NoResultFound(
"No row was found when one was required"
)
return instance
def _get_impl(
self,
entity: _EntityBindKey[_O],
primary_key_identity: _PKIdentityArgument,
db_load_fn: Callable[..., _O],
*,
options: Optional[Sequence[ExecutableOption]] = None,
populate_existing: bool = False,
with_for_update: ForUpdateParameter = None,
identity_token: Optional[Any] = None,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
) -> Optional[_O]:
# convert composite types to individual args
if (
is_composite_class(primary_key_identity)
and type(primary_key_identity)
in descriptor_props._composite_getters
):
getter = descriptor_props._composite_getters[
type(primary_key_identity)
]
primary_key_identity = getter(primary_key_identity)
mapper: Optional[Mapper[_O]] = inspect(entity)
if mapper is None or not mapper.is_mapper:
raise sa_exc.ArgumentError(
"Expected mapped class or mapper, got: %r" % entity
)
is_dict = isinstance(primary_key_identity, dict)
if not is_dict:
primary_key_identity = util.to_list(
primary_key_identity, default=[None]
)
if len(primary_key_identity) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for session.get(); primary key columns "
"are %s" % ",".join("'%s'" % c for c in mapper.primary_key)
)
if is_dict:
pk_synonyms = mapper._pk_synonyms
if pk_synonyms:
correct_keys = set(pk_synonyms).intersection(
primary_key_identity
)
if correct_keys:
primary_key_identity = dict(primary_key_identity)
for k in correct_keys:
primary_key_identity[pk_synonyms[k]] = (
primary_key_identity[k]
)
try:
primary_key_identity = list(
primary_key_identity[prop.key]
for prop in mapper._identity_key_props
)
except KeyError as err:
raise sa_exc.InvalidRequestError(
"Incorrect names of values in identifier to formulate "
"primary key for session.get(); primary key attribute "
"names are %s (synonym names are also accepted)"
% ",".join(
"'%s'" % prop.key
for prop in mapper._identity_key_props
)
) from err
if (
not populate_existing
and not mapper.always_refresh
and with_for_update is None
):
instance = self._identity_lookup(
mapper,
primary_key_identity,
identity_token=identity_token,
execution_options=execution_options,
bind_arguments=bind_arguments,
)
if instance is not None:
# reject calls for id in identity map but class
# mismatch.
if not isinstance(instance, mapper.class_):
return None
return instance
# TODO: this was being tested before, but this is not possible
assert instance is not LoaderCallableStatus.PASSIVE_CLASS_MISMATCH
load_options = context.QueryContext.default_load_options
if populate_existing:
load_options += {"_populate_existing": populate_existing}
statement = sql.select(mapper)
if with_for_update is not None:
statement._for_update_arg = ForUpdateArg._from_argument(
with_for_update
)
if options:
statement = statement.options(*options)
if self.execution_options:
execution_options = self.execution_options.union(execution_options)
return db_load_fn(
self,
statement,
primary_key_identity,
load_options=load_options,
identity_token=identity_token,
execution_options=execution_options,
bind_arguments=bind_arguments,
)
def merge(
self,
instance: _O,
*,
load: bool = True,
options: Optional[Sequence[ORMOption]] = None,
) -> _O:
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target
instance. The resulting target instance is then returned by the
method; the original source instance is left unmodified, and
un-associated with the :class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the
method.
:param options: optional sequence of loader options which will be
applied to the :meth:`_orm.Session.get` method when the merge
operation loads the existing version of the object from the database.
.. versionadded:: 1.4.24
.. seealso::
:func:`.make_transient_to_detached` - provides for an alternative
means of "merging" a single object into the :class:`.Session`
:meth:`.Session.merge_all` - multiple instance version
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
if load:
# flush current contents if we expect to load data
self._autoflush()
with self.no_autoflush:
return self._merge(
object_state(instance),
attributes.instance_dict(instance),
load=load,
options=options,
_recursive={},
_resolve_conflict_map={},
)
def merge_all(
self,
instances: Iterable[_O],
*,
load: bool = True,
options: Optional[Sequence[ORMOption]] = None,
) -> Sequence[_O]:
"""Calls :meth:`.Session.merge` on multiple instances.
.. seealso::
:meth:`.Session.merge` - main documentation on merge
.. versionadded:: 2.1
"""
if self._warn_on_events:
self._flush_warning("Session.merge_all()")
if load:
# flush current contents if we expect to load data
self._autoflush()
return [
self._merge(
object_state(instance),
attributes.instance_dict(instance),
load=load,
options=options,
_recursive={},
_resolve_conflict_map={},
)
for instance in instances
]
def _merge(
self,
state: InstanceState[_O],
state_dict: _InstanceDict,
*,
options: Optional[Sequence[ORMOption]] = None,
load: bool,
_recursive: Dict[Any, object],
_resolve_conflict_map: Dict[_IdentityKeyType[Any], object],
) -> _O:
mapper: Mapper[_O] = _state_mapper(state)
if state in _recursive:
return cast(_O, _recursive[state])
new_instance = False
key = state.key
merged: Optional[_O]
if key is None:
if state in self._new:
util.warn(
"Instance %s is already pending in this Session yet is "
"being merged again; this is probably not what you want "
"to do" % state_str(state)
)
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False."
)
key = mapper._identity_key_from_state(state)
key_is_persistent = LoaderCallableStatus.NEVER_SET not in key[
1
] and (
not _none_set.intersection(key[1])
or (
mapper.allow_partial_pks
and not _none_set.issuperset(key[1])
)
)
else:
key_is_persistent = True
merged = self.identity_map.get(key)
if merged is None:
if key_is_persistent and key in _resolve_conflict_map:
merged = cast(_O, _resolve_conflict_map[key])
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False."
)
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif key_is_persistent:
merged = self.get(
mapper.class_,
key[1],
identity_token=key[2],
options=options,
)
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
_resolve_conflict_map[key] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=PassiveFlag.PASSIVE_NO_INITIALIZE,
)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=PassiveFlag.PASSIVE_NO_INITIALIZE,
)
if (
existing_version
is not LoaderCallableStatus.PASSIVE_NO_RESULT
and merged_version
is not LoaderCallableStatus.PASSIVE_NO_RESULT
and existing_version != merged_version
):
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
state_str(merged_state),
merged_version,
)
)
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
# since we are copying load_options, we need to copy
# the callables_ that would have been generated by those
# load_options.
# assumes that the callables we put in state.callables_
# are not instance-specific (which they should not be)
merged_state._copy_callables(state)
for prop in mapper.iterate_properties:
prop.merge(
self,
state,
state_dict,
merged_state,
merged_dict,
load,
_recursive,
_resolve_conflict_map,
)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
merged_state.manager.dispatch._sa_event_merge_wo_load(
merged_state, None
)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state: InstanceState[Any]) -> None:
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session"
% state_str(state)
)
def _save_impl(self, state: InstanceState[Any]) -> None:
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - "
"it can't be registered as pending" % state_str(state)
)
obj = state.obj()
to_attach = self._before_attach(state, obj)
if state not in self._new:
self._new[state] = obj
state.insert_order = len(self._new)
if to_attach:
self._after_attach(state, obj)
def _update_impl(
self, state: InstanceState[Any], revert_deletion: bool = False
) -> None:
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" % state_str(state)
)
if state._deleted:
if revert_deletion:
if not state._attached:
return
del state._deleted
else:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. "
"Use the make_transient() "
"function to send this object back "
"to the transient state." % state_str(state)
)
obj = state.obj()
# check for late gc
if obj is None:
return
to_attach = self._before_attach(state, obj)
self._deleted.pop(state, None)
if revert_deletion:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
if to_attach:
self._after_attach(state, obj)
elif revert_deletion:
self.dispatch.deleted_to_persistent(self, state)
def _save_or_update_impl(self, state: InstanceState[Any]) -> None:
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def enable_relationship_loading(self, obj: object) -> None:
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`_orm.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key and primary key values
present on this object - if not present, then those relationships
will be unavailable.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method is
similar to the ``load_on_pending`` flag on :func:`_orm.relationship`.
Unlike that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
If the object instead represents an existing identity in the database,
it should be merged using :meth:`.Session.merge`.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. seealso::
:paramref:`_orm.relationship.load_on_pending` - this flag
allows per-relationship loading of many-to-ones on items that
are pending.
:func:`.make_transient_to_detached` - allows for an object to
be added to a :class:`.Session` without SQL emitted, which then
will unexpire attributes on access.
"""
try:
state = attributes.instance_state(obj)
except exc.NO_STATE as err:
raise exc.UnmappedInstanceError(obj) from err
to_attach = self._before_attach(state, obj)
state._load_pending = True
if to_attach:
self._after_attach(state, obj)
def _before_attach(self, state: InstanceState[Any], obj: object) -> bool:
self._autobegin_t()
if state.session_id == self.hash_key:
return False
if state.session_id and state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')"
% (state_str(state), state.session_id, self.hash_key)
)
self.dispatch.before_attach(self, state)
return True
def _after_attach(self, state: InstanceState[Any], obj: object) -> None:
state.session_id = self.hash_key
if state.modified and state._strong_obj is None:
state._strong_obj = obj
self.dispatch.after_attach(self, state)
if state.key:
self.dispatch.detached_to_persistent(self, state)
else:
self.dispatch.transient_to_pending(self, state)
def __contains__(self, instance: object) -> bool:
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
raise exc.UnmappedInstanceError(instance) from err
return self._contains_state(state)
def __iter__(self) -> Iterator[object]:
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(
list(self._new.values()) + list(self.identity_map.values())
)
def _contains_state(self, state: InstanceState[Any]) -> bool:
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects: Optional[Sequence[Any]] = None) -> None:
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
.. deprecated:: 2.1
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method: Any) -> None:
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead." % method
)
def _is_clean(self) -> bool:
return (
not self.identity_map.check_modified()
and not self._deleted
and not self._new
)
# have this here since it otherwise causes issues with the proxy
# method generation
@deprecated_params(
objects=(
"2.1",
"The `objects` parameter of `Session.flush` is deprecated",
)
)
def _flush(self, objects: Optional[Sequence[object]] = None) -> None:
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE as err:
raise exc.UnmappedInstanceError(o) from err
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = _state_mapper(state)._is_orphan(state)
is_persistent_orphan = is_orphan and state.has_identity
if (
is_orphan
and not is_persistent_orphan
and state._orphaned_outside_of_session
):
self._expunge_states([state])
else:
_reg = flush_context.register_object(
state, isdelete=is_persistent_orphan
)
assert _reg, "Failed to add object to the flush context!"
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
_reg = flush_context.register_object(state, isdelete=True)
assert _reg, "Failed to add object to the flush context!"
if not flush_context.has_work:
return
flush_context.transaction = transaction = self._autobegin_t()._begin()
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[
(state, state.dict)
for state in self.identity_map._modified
],
instance_dict=self.identity_map,
)
util.warn(
"Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been "
"reset, and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning." % len_
)
# useful assertions:
# if not objects:
# assert not self.identity_map._modified
# else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
def bulk_save_objects(
self,
objects: Iterable[object],
return_defaults: bool = False,
update_changed_only: bool = True,
preserve_order: bool = True,
) -> None:
"""Perform a bulk save of the given list of objects.
.. legacy::
This method is a legacy feature as of the 2.0 series of
SQLAlchemy. For modern bulk INSERT and UPDATE, see
the sections :ref:`orm_queryguide_bulk_insert` and
:ref:`orm_queryguide_bulk_update`.
For general INSERT and UPDATE of existing ORM mapped objects,
prefer standard :term:`unit of work` data management patterns,
introduced in the :ref:`unified_tutorial` at
:ref:`tutorial_orm_data_manipulation`. SQLAlchemy 2.0
now uses :ref:`engine_insertmanyvalues` with modern dialects
which solves previous issues of bulk INSERT slowness.
:param objects: a sequence of mapped object instances. The mapped
objects are persisted as is, and are **not** associated with the
:class:`.Session` afterwards.
For each object, whether the object is sent as an INSERT or an
UPDATE is dependent on the same rules used by the :class:`.Session`
in traditional operation; if the object has the
:attr:`.InstanceState.key`
attribute set, then the object is assumed to be "detached" and
will result in an UPDATE. Otherwise, an INSERT is used.
In the case of an UPDATE, statements are grouped based on which
attributes have changed, and are thus to be the subject of each
SET clause. If ``update_changed_only`` is False, then all
attributes present within each object are applied to the UPDATE
statement, which may help in allowing the statements to be grouped
together into a larger executemany(), and will also reduce the
overhead of checking history on attributes.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary key values ahead of time; however,
:paramref:`.Session.bulk_save_objects.return_defaults` **greatly
reduces the performance gains** of the method overall. It is strongly
advised to please use the standard :meth:`_orm.Session.add_all`
approach.
:param update_changed_only: when True, UPDATE statements are rendered
based on those attributes in each state that have logged changes.
When False, all attributes present are rendered into the SET clause
with the exception of primary key attributes.
:param preserve_order: when True, the order of inserts and updates
matches exactly the order in which the objects are given. When
False, common types of objects are grouped into inserts
and updates, to allow for more batching opportunities.
.. seealso::
:doc:`queryguide/dml`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_update_mappings`
"""
obj_states: Iterable[InstanceState[Any]]
obj_states = (attributes.instance_state(obj) for obj in objects)
if not preserve_order:
# the purpose of this sort is just so that common mappers
# and persistence states are grouped together, so that groupby
# will return a single group for a particular type of mapper.
# it's not trying to be deterministic beyond that.
obj_states = sorted(
obj_states,
key=lambda state: (id(state.mapper), state.key is not None),
)
def grouping_key(
state: InstanceState[_O],
) -> Tuple[Mapper[_O], bool]:
return (state.mapper, state.key is not None)
for (mapper, isupdate), states in itertools.groupby(
obj_states, grouping_key
):
self._bulk_save_mappings(
mapper,
states,
isupdate=isupdate,
isstates=True,
return_defaults=return_defaults,
update_changed_only=update_changed_only,
render_nulls=False,
)
def bulk_insert_mappings(
self,
mapper: Mapper[Any],
mappings: Iterable[Dict[str, Any]],
return_defaults: bool = False,
render_nulls: bool = False,
) -> None:
"""Perform a bulk insert of the given list of mapping dictionaries.
.. legacy::
This method is a legacy feature as of the 2.0 series of
SQLAlchemy. For modern bulk INSERT and UPDATE, see
the sections :ref:`orm_queryguide_bulk_insert` and
:ref:`orm_queryguide_bulk_update`. The 2.0 API shares
implementation details with this method and adds new features
as well.
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be inserted, in terms of the attribute
names on the mapped class. If the mapping refers to multiple tables,
such as a joined-inheritance mapping, each dictionary must contain all
keys to be populated into all tables.
:param return_defaults: when True, the INSERT process will be altered
to ensure that newly generated primary key values will be fetched.
The rationale for this parameter is typically to enable
:ref:`Joined Table Inheritance <joined_inheritance>` mappings to
be bulk inserted.
.. note:: for backends that don't support RETURNING, the
:paramref:`_orm.Session.bulk_insert_mappings.return_defaults`
parameter can significantly decrease performance as INSERT
statements can no longer be batched. See
:ref:`engine_insertmanyvalues`
for background on which backends are affected.
:param render_nulls: When True, a value of ``None`` will result
in a NULL value being included in the INSERT statement, rather
than the column being omitted from the INSERT. This allows all
the rows being INSERTed to have the identical set of columns which
allows the full set of rows to be batched to the DBAPI. Normally,
each column-set that contains a different combination of NULL values
than the previous row must omit a different series of columns from
the rendered INSERT statement, which means it must be emitted as a
separate statement. By passing this flag, the full set of rows
are guaranteed to be batchable into one batch; the cost however is
that server-side defaults which are invoked by an omitted column will
be skipped, so care must be taken to ensure that these are not
necessary.
.. warning::
When this flag is set, **server side default SQL values will
not be invoked** for those columns that are inserted as NULL;
the NULL value will be sent explicitly. Care must be taken
to ensure that no server-side default functions need to be
invoked for the operation as a whole.
.. seealso::
:doc:`queryguide/dml`
:meth:`.Session.bulk_save_objects`
:meth:`.Session.bulk_update_mappings`
"""
self._bulk_save_mappings(
mapper,
mappings,
isupdate=False,
isstates=False,
return_defaults=return_defaults,
update_changed_only=False,
render_nulls=render_nulls,
)
def bulk_update_mappings(
self, mapper: Mapper[Any], mappings: Iterable[Dict[str, Any]]
) -> None:
"""Perform a bulk update of the given list of mapping dictionaries.
.. legacy::
This method is a legacy feature as of the 2.0 series of
SQLAlchemy. For modern bulk INSERT and UPDATE, see
the sections :ref:`orm_queryguide_bulk_insert` and
:ref:`orm_queryguide_bulk_update`. The 2.0 API shares
implementation details with this method and adds new features
as well.
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be updated, in terms of the attribute names
on the mapped class. If the mapping refers to multiple tables, such
as a joined-inheritance mapping, each dictionary may contain keys
corresponding to all tables. All those keys which are present and
are not part of the primary key are applied to the SET clause of the
UPDATE statement; the primary key values, which are required, are
applied to the WHERE clause.
.. seealso::
:doc:`queryguide/dml`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_save_objects`
"""
self._bulk_save_mappings(
mapper,
mappings,
isupdate=True,
isstates=False,
return_defaults=False,
update_changed_only=False,
render_nulls=False,
)
def _bulk_save_mappings(
self,
mapper: Mapper[_O],
mappings: Union[Iterable[InstanceState[_O]], Iterable[Dict[str, Any]]],
*,
isupdate: bool,
isstates: bool,
return_defaults: bool,
update_changed_only: bool,
render_nulls: bool,
) -> None:
mapper = _class_to_mapper(mapper)
self._flushing = True
transaction = self._autobegin_t()._begin()
try:
if isupdate:
bulk_persistence._bulk_update(
mapper,
mappings,
transaction,
isstates=isstates,
update_changed_only=update_changed_only,
)
else:
bulk_persistence._bulk_insert(
mapper,
mappings,
transaction,
isstates=isstates,
return_defaults=return_defaults,
render_nulls=render_nulls,
)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
finally:
self._flushing = False
def is_modified(
self, instance: object, include_collections: bool = True
) -> bool:
r"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously flushed or committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may
report ``False`` when tested with this method. This is because
the object may have received change events via attribute mutation,
thus placing it in :attr:`.Session.dirty`, but ultimately the state
is the same as that loaded from the database, resulting in no net
change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if (
not include_collections
and hasattr(attr.impl, "get_collection")
) or not hasattr(attr.impl, "get_history"):
continue
(added, unchanged, deleted) = attr.impl.get_history(
state, dict_, passive=PassiveFlag.NO_CHANGE
)
if added or deleted:
return True
else:
return False
@property
def is_active(self) -> bool:
"""True if this :class:`.Session` not in "partial rollback" state.
.. versionchanged:: 1.4 The :class:`_orm.Session` no longer begins
a new transaction immediately, so this attribute will be False
when the :class:`_orm.Session` is first instantiated.
"partial rollback" state typically indicates that the flush process
of the :class:`_orm.Session` has failed, and that the
:meth:`_orm.Session.rollback` method must be emitted in order to
fully roll back the transaction.
If this :class:`_orm.Session` is not in a transaction at all, the
:class:`_orm.Session` will autobegin when it is first used, so in this
case :attr:`_orm.Session.is_active` will return True.
Otherwise, if this :class:`_orm.Session` is within a transaction,
and that transaction has not been rolled back internally, the
:attr:`_orm.Session.is_active` will also return True.
.. seealso::
:ref:`faq_session_rollback`
:meth:`_orm.Session.in_transaction`
"""
return self._transaction is None or self._transaction.is_active
@property
def _dirty_states(self) -> Iterable[InstanceState[Any]]:
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self) -> IdentitySet:
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return IdentitySet(
[
state.obj()
for state in self._dirty_states
if state not in self._deleted
]
)
@property
def deleted(self) -> IdentitySet:
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(list(self._deleted.values()))
@property
def new(self) -> IdentitySet:
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(list(self._new.values()))
_S = TypeVar("_S", bound="Session")
| Session |
python | ray-project__ray | doc/source/ray-core/doc_code/runtime_env_example.py | {
"start": 1019,
"end": 1208
} | class ____:
def g(self):
pass
ray.get(f_job.remote())
a = Actor_job.remote()
ray.get(a.g.remote())
ray.shutdown()
ray.init()
@ray.remote
def f():
pass
@ray.remote
| Actor_job |
python | plotly__plotly.py | plotly/graph_objs/histogram/selected/_marker.py | {
"start": 233,
"end": 3021
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram.selected"
_path_str = "histogram.selected.marker"
_valid_props = {"color", "opacity"}
@property
def color(self):
"""
Sets the marker color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def opacity(self):
"""
Sets the marker opacity of selected points.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
"""
def __init__(self, arg=None, color=None, opacity=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.selected.Marker`
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.selected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.selected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("opacity", arg, opacity)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | walkccc__LeetCode | solutions/3500. Minimum Cost to Divide Array Into Subarrays/3500.py | {
"start": 0,
"end": 588
} | class ____:
def minimumCost(self, nums: list[int], cost: list[int], k: int) -> int:
n = len(nums)
prefixNums = list(itertools.accumulate(nums, initial=0))
prefixCost = list(itertools.accumulate(cost, initial=0))
# dp[i] := the minimum cost to divide nums[i..n - 1] into subarrays
dp = [math.inf] * n + [0]
for i in range(n - 1, -1, -1):
for j in range(i, n):
dp[i] = min(dp[i],
prefixNums[j + 1] * (prefixCost[j + 1] - prefixCost[i]) +
k * (prefixCost[n] - prefixCost[i]) + dp[j + 1])
return dp[0]
| Solution |
python | getsentry__sentry | src/sentry/replays/models.py | {
"start": 783,
"end": 1470
} | class ____(DefaultFieldsModel):
__relocation_scope__ = RelocationScope.Excluded
range_start = models.DateTimeField()
range_end = models.DateTimeField()
environments = ArrayField(models.TextField(), default=list)
organization_id = BoundedBigIntegerField(db_index=True)
project_id = BoundedBigIntegerField(db_index=True)
status = models.CharField(choices=DeletionJobStatus.choices, default=DeletionJobStatus.PENDING)
query = models.TextField()
offset = models.IntegerField(default=0)
class Meta:
app_label = "replays"
db_table = "replays_replaydeletionjob"
# Based heavily on EventAttachment
@region_silo_model
| ReplayDeletionJobModel |
python | walkccc__LeetCode | solutions/1186. Maximum Subarray Sum with One Deletion/1186-2.py | {
"start": 0,
"end": 318
} | class ____:
# Similar to 53. Maximum Subarray
def maximumSum(self, arr: list[int]) -> int:
ans = -math.inf
zero = -math.inf # no deletion
one = -math.inf # <= 1 deletion
for a in arr:
one = max(a, one + a, zero)
zero = max(a, zero + a)
ans = max(ans, one)
return ans
| Solution |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 30366,
"end": 31522
} | class ____:
"""Descriptor for getting and setting name properties."""
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.name = name
def __get__(self, obj: StylesBase, objtype: type[StylesBase] | None) -> str:
"""Get the name property.
Args:
obj: The ``Styles`` object.
objtype: The ``Styles`` class.
Returns:
The name.
"""
return obj.get_rule(self.name, "") # type: ignore[return-value]
def __set__(self, obj: StylesBase, name: str | None):
"""Set the name property.
Args:
obj: The ``Styles`` object.
name: The name to set the property to.
Raises:
StyleTypeError: If the value is not a ``str``.
"""
_rich_traceback_omit = True
if name is None:
if obj.clear_rule(self.name):
obj.refresh(layout=True)
else:
if not isinstance(name, str):
raise StyleTypeError(f"{self.name} must be a str")
if obj.set_rule(self.name, name):
obj.refresh(layout=True)
| NameProperty |
python | apache__airflow | providers/apache/kafka/tests/unit/apache/kafka/hooks/test_base.py | {
"start": 1395,
"end": 3826
} | class ____:
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_get_conn(self, mock_get_connection, hook):
config = {"bootstrap.servers": MagicMock()}
mock_get_connection.return_value.extra_dejson = config
assert hook.get_conn == config
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_get_conn_value_error(self, mock_get_connection, hook):
mock_get_connection.return_value.extra_dejson = {}
with pytest.raises(ValueError, match="must be provided"):
hook.get_conn()
@mock.patch("airflow.providers.apache.kafka.hooks.base.AdminClient")
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_test_connection(self, mock_get_connection, admin_client, hook):
config = {"bootstrap.servers": MagicMock()}
mock_get_connection.return_value.extra_dejson = config
connection = hook.test_connection()
admin_client.assert_called_once_with(config)
mock_admin_instance = admin_client.return_value
mock_admin_instance.list_topics.assert_called_once_with(timeout=TIMEOUT)
assert connection == (True, "Connection successful.")
@mock.patch(
"airflow.providers.apache.kafka.hooks.base.AdminClient",
return_value=MagicMock(list_topics=MagicMock(return_value=[])),
)
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_test_connection_no_topics(self, mock_get_connection, admin_client, hook):
config = {"bootstrap.servers": MagicMock()}
mock_get_connection.return_value.extra_dejson = config
connection = hook.test_connection()
admin_client.assert_called_once_with(config)
mock_admin_instance = admin_client.return_value
mock_admin_instance.list_topics.assert_called_once_with(timeout=TIMEOUT)
assert connection == (False, "Failed to establish connection.")
@mock.patch("airflow.providers.apache.kafka.hooks.base.AdminClient")
@mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection")
def test_test_connection_exception(self, mock_get_connection, admin_client, hook):
config = {"bootstrap.servers": MagicMock()}
mock_get_connection.return_value.extra_dejson = config
admin_client.return_value.list_topics.side_effect = [ValueError("some error")]
connection = hook.test_connection()
assert connection == (False, "some error")
| TestKafkaBaseHook |
python | numba__numba | numba/core/compiler_machinery.py | {
"start": 2704,
"end": 2828
} | class ____(CompilerPass):
""" Base class for analysis passes (no modification made to state)
"""
pass
| AnalysisPass |
python | kamyu104__LeetCode-Solutions | Python/digit-count-in-range.py | {
"start": 32,
"end": 617
} | class ____(object):
def digitsCount(self, d, low, high):
"""
:type d: int
:type low: int
:type high: int
:rtype: int
"""
def digitsCount(n, k):
pivot, result = 1, 0
while n >= pivot:
result += (n//(10*pivot))*pivot + \
min(pivot, max(n%(10*pivot) - k*pivot + 1, 0))
if k == 0:
result -= pivot
pivot *= 10
return result+1
return digitsCount(high, d) - digitsCount(low-1, d)
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run.py | {
"start": 8937,
"end": 20943
} | class ____(BaseJobConfiguration):
"""
Configuration class used by the Cloud Run Worker to create a Cloud Run Job.
An instance of this class is passed to the Cloud Run worker's `run` method
for each flow run. It contains all information necessary to execute
the flow run as a Cloud Run Job.
Attributes:
region: The region where the Cloud Run Job resides.
credentials: The GCP Credentials used to connect to Cloud Run.
job_body: The job body used to create the Cloud Run Job.
timeout: Max allowed duration the job may be active before Cloud Run
will actively try to mark it failed and kill associated containers
(maximum of 3600 seconds, 1 hour).
keep_job: Whether to delete the Cloud Run Job after it completes.
prefect_api_key_secret: A GCP secret containing a Prefect API Key.
prefect_api_auth_string_secret: A GCP secret containing a Prefect API authorization string.
"""
region: str = Field(
default="us-central1", description="The region where the Cloud Run Job resides."
)
credentials: Optional[GcpCredentials] = Field(
title="GCP Credentials",
default_factory=GcpCredentials,
description="The GCP Credentials used to connect to Cloud Run. "
"If not provided credentials will be inferred from "
"the local environment.",
)
prefect_api_key_secret: Optional[SecretKeySelector] = Field(
title="Prefect API Key Secret",
default=None,
description=(
"A GCP secret containing a Prefect API Key. This key will be used "
"to authenticate Cloud Run tasks with Prefect Cloud. If not provided, the "
"PREFECT_API_KEY environment variable will be used if the worker has one."
),
)
prefect_api_auth_string_secret: Optional[SecretKeySelector] = Field(
title="Prefect API Auth String Secret",
default=None,
description=(
"A GCP secret containing a Prefect API authorization string. This "
"string will be used to authenticate Cloud Run tasks with Prefect Cloud. "
"If not provided, the PREFECT_API_AUTH_STRING environment variable will be "
"used if the worker has one."
),
)
job_body: Dict[str, Any] = Field(
json_schema_extra=dict(template=_get_default_job_body_template())
)
timeout: Optional[int] = Field(
default=600,
gt=0,
le=3600,
title="Job Timeout",
description=(
"Max allowed duration the Job may be active before Cloud Run will "
"actively try to mark it failed and kill associated containers (maximum of 3600 seconds, 1 hour)."
),
)
keep_job: Optional[bool] = Field(
default=False,
title="Keep Job After Completion",
description="Keep the completed Cloud Run Job on Google Cloud Platform.",
)
@property
def project(self) -> str:
"""property for accessing the project from the credentials."""
return self.credentials.project
@property
def job_name(self) -> str:
"""property for accessing the name from the job metadata."""
return self.job_body["metadata"]["name"]
def _get_flow_run_logger(
self,
flow_run: "FlowRun",
work_pool: "WorkPool | None" = None,
worker_name: str | None = None,
) -> PrefectLogAdapter:
extra = {
"work_pool_name": (work_pool.name if work_pool else "<unknown>"),
"worker_name": worker_name if worker_name else "<unknown>",
"work_pool_id": str(work_pool.id if work_pool else "unknown"),
}
return flow_run_logger(flow_run=flow_run).getChild(
"worker",
extra=extra,
)
def prepare_for_flow_run(
self,
flow_run: "FlowRun",
deployment: Optional["DeploymentResponse"] = None,
flow: Optional["Flow"] = None,
work_pool: Optional["WorkPool"] = None,
worker_name: Optional[str] = None,
):
"""
Prepares the job configuration for a flow run.
Ensures that necessary values are present in the job body and that the
job body is valid.
Args:
flow_run: The flow run to prepare the job configuration for
deployment: The deployment associated with the flow run used for
preparation.
flow: The flow associated with the flow run used for preparation.
"""
super().prepare_for_flow_run(flow_run, deployment, flow, work_pool, worker_name)
self._populate_envs()
self._warn_about_plaintext_credentials(flow_run, worker_name, work_pool)
self._populate_or_format_command()
self._format_args_if_present()
self._populate_image_if_not_present()
self._populate_name_if_not_present()
def _populate_envs(self):
"""Populate environment variables. BaseWorker.prepare_for_flow_run handles
putting the environment variables in the `env` attribute. This method
moves them into the jobs body"""
# Create a copy of the environment variables to avoid modifying the original
env_copy = self.env.copy()
# Remove Prefect API credentials from environment if secrets are provided
if self.prefect_api_key_secret:
env_copy.pop("PREFECT_API_KEY", None)
if self.prefect_api_auth_string_secret:
env_copy.pop("PREFECT_API_AUTH_STRING", None)
# Set regular environment variables
envs = [{"name": k, "value": v} for k, v in env_copy.items()]
# Add secret-based environment variables
if self.prefect_api_key_secret:
envs.append(
{
"name": "PREFECT_API_KEY",
"valueFrom": {
"secretKeyRef": {
"name": self.prefect_api_key_secret.secret,
"key": self.prefect_api_key_secret.version,
}
},
}
)
if self.prefect_api_auth_string_secret:
envs.append(
{
"name": "PREFECT_API_AUTH_STRING",
"valueFrom": {
"secretKeyRef": {
"name": self.prefect_api_auth_string_secret.secret,
"key": self.prefect_api_auth_string_secret.version,
}
},
}
)
self.job_body["spec"]["template"]["spec"]["template"]["spec"]["containers"][0][
"env"
] = envs
def _warn_about_plaintext_credentials(
self,
flow_run: "FlowRun",
worker_name: str | None = None,
work_pool: "WorkPool | None" = None,
):
"""
Warns about plaintext credentials when no secrets are configured.
"""
if "PREFECT_API_KEY" in self.env and not self.prefect_api_key_secret:
self._get_flow_run_logger(
flow_run=flow_run,
worker_name=worker_name,
work_pool=work_pool,
).warning(
"PREFECT_API_KEY is provided as a plaintext environment variable. "
"For better security, consider providing it as a secret using "
"'prefect_api_key_secret' in your base job template."
)
if (
"PREFECT_API_AUTH_STRING" in self.env
and not self.prefect_api_auth_string_secret
):
self._get_flow_run_logger(
flow_run=flow_run,
worker_name=worker_name,
work_pool=work_pool,
).warning(
"PREFECT_API_AUTH_STRING is provided as a plaintext environment variable. "
"For better security, consider providing it as a secret using "
"'prefect_api_auth_string_secret' in your base job template."
)
def _populate_name_if_not_present(self):
"""Adds the flow run name to the job if one is not already provided."""
try:
if "name" not in self.job_body["metadata"]:
base_job_name = slugify_name(self.name)
job_name = f"{base_job_name}-{uuid4().hex}"
self.job_body["metadata"]["name"] = job_name
except KeyError:
raise ValueError("Unable to verify name due to invalid job body template.")
def _populate_image_if_not_present(self):
"""Adds the latest prefect image to the job if one is not already provided."""
try:
if (
"image"
not in self.job_body["spec"]["template"]["spec"]["template"]["spec"][
"containers"
][0]
):
self.job_body["spec"]["template"]["spec"]["template"]["spec"][
"containers"
][0]["image"] = f"docker.io/{get_prefect_image_name()}"
except KeyError:
raise ValueError("Unable to verify image due to invalid job body template.")
def _populate_or_format_command(self):
"""
Ensures that the command is present in the job manifest. Populates the command
with the `prefect -m prefect.engine` if a command is not present.
"""
try:
command = self.job_body["spec"]["template"]["spec"]["template"]["spec"][
"containers"
][0].get("command")
if command is None:
self.job_body["spec"]["template"]["spec"]["template"]["spec"][
"containers"
][0]["command"] = shlex.split(self._base_flow_run_command())
elif isinstance(command, str):
self.job_body["spec"]["template"]["spec"]["template"]["spec"][
"containers"
][0]["command"] = shlex.split(command)
except KeyError:
raise ValueError(
"Unable to verify command due to invalid job body template."
)
def _format_args_if_present(self):
try:
args = self.job_body["spec"]["template"]["spec"]["template"]["spec"][
"containers"
][0].get("args")
if args is not None and isinstance(args, str):
self.job_body["spec"]["template"]["spec"]["template"]["spec"][
"containers"
][0]["args"] = shlex.split(args)
except KeyError:
raise ValueError("Unable to verify args due to invalid job body template.")
@field_validator("job_body")
@classmethod
def _ensure_job_includes_all_required_components(cls, value: Dict[str, Any]):
"""
Ensures that the job body includes all required components.
"""
patch = JsonPatch.from_diff(value, _get_base_job_body())
missing_paths = sorted([op["path"] for op in patch if op["op"] == "add"])
if missing_paths:
raise ValueError(
"Job is missing required attributes at the following paths: "
f"{', '.join(missing_paths)}"
)
return value
@field_validator("job_body")
@classmethod
def _ensure_job_has_compatible_values(cls, value: Dict[str, Any]):
"""Ensure that the job body has compatible values."""
patch = JsonPatch.from_diff(value, _get_base_job_body())
incompatible = sorted(
[
f"{op['path']} must have value {op['value']!r}"
for op in patch
if op["op"] == "replace"
]
)
if incompatible:
raise ValueError(
"Job has incompatible values for the following attributes: "
f"{', '.join(incompatible)}"
)
return value
| CloudRunWorkerJobConfiguration |
python | getsentry__sentry | tests/sentry/notifications/notification_action/metric_alert_registry/test_sentry_app_metric_alert_handler.py | {
"start": 1226,
"end": 8679
} | class ____(MetricAlertHandlerBase):
def setUp(self) -> None:
super().setUp()
self.sentry_app = self.create_sentry_app(
name="foo",
organization=self.organization,
is_alertable=True,
verify_install=False,
)
self.action = self.create_action(
type=Action.Type.SENTRY_APP,
integration_id=None,
config={
"target_identifier": str(self.sentry_app.id),
"target_type": ActionTarget.SENTRY_APP.value,
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_ID,
},
)
self.handler = SentryAppMetricAlertHandler()
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.handlers.sentry_app_metric_alert_handler.send_incident_alert_notification"
)
@freeze_time("2021-01-01 00:00:00")
def test_send_alert(self, mock_send_incident_alert_notification: mock.MagicMock) -> None:
notification_context = NotificationContext.from_action_model(self.action)
assert self.group_event.occurrence is not None
assert self.group_event.occurrence.priority is not None
alert_context = AlertContext.from_workflow_engine_models(
self.detector,
self.evidence_data,
self.group_event.group.status,
DetectorPriorityLevel(self.group_event.occurrence.priority),
)
metric_issue_context = MetricIssueContext.from_group_event(
self.group,
self.evidence_data,
DetectorPriorityLevel(self.group_event.occurrence.priority),
)
open_period_context = OpenPeriodContext.from_group(self.group)
notification_uuid = str(uuid.uuid4())
self.handler.send_alert(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
trigger_status=TriggerStatus.ACTIVE,
project=self.detector.project,
organization=self.detector.project.organization,
notification_uuid=notification_uuid,
)
mock_send_incident_alert_notification.assert_called_once_with(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
organization=self.detector.project.organization,
notification_uuid=notification_uuid,
incident_serialized_response=get_incident_serializer(self.open_period),
)
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.SentryAppMetricAlertHandler.send_alert"
)
@freeze_time("2021-01-01 00:00:00")
def test_invoke_legacy_registry(self, mock_send_alert: mock.MagicMock) -> None:
self.handler.invoke_legacy_registry(self.event_data, self.action, self.detector)
assert mock_send_alert.call_count == 1
(
notification_context,
alert_context,
metric_issue_context,
open_period_context,
organization,
notification_uuid,
) = self.unpack_kwargs(mock_send_alert)
self.assert_notification_context(
notification_context,
integration_id=None,
target_identifier=None,
target_display=None,
sentry_app_config=None,
sentry_app_id=str(self.sentry_app.id),
)
self.assert_alert_context(
alert_context,
name=self.detector.name,
action_identifier_id=self.detector.id,
threshold_type=AlertRuleThresholdType.ABOVE,
detection_type=AlertRuleDetectionType.STATIC,
comparison_delta=None,
alert_threshold=self.evidence_data.conditions[0]["comparison"],
)
self.assert_metric_issue_context(
metric_issue_context,
open_period_identifier=self.open_period.id,
snuba_query=self.snuba_query,
new_status=IncidentStatus.CRITICAL,
title=self.group_event.group.title,
metric_value=123.45,
group=self.group_event.group,
subscription=self.subscription,
)
self.assert_open_period_context(
open_period_context,
id=self.open_period.id,
date_started=self.group_event.group.first_seen,
date_closed=None,
)
assert organization == self.detector.project.organization
assert isinstance(notification_uuid, str)
@mock.patch(
"sentry.notifications.notification_action.metric_alert_registry.handlers.sentry_app_metric_alert_handler.SentryAppMetricAlertHandler.send_alert"
)
@freeze_time("2021-01-01 00:00:00")
def test_invoke_legacy_registry_with_activity(self, mock_send_alert: mock.MagicMock) -> None:
# Create an Activity instance with evidence data and priority
activity_data = asdict(self.evidence_data)
activity = Activity(
project=self.project,
group=self.group,
type=ActivityType.SET_RESOLVED.value,
data=activity_data,
)
activity.save()
# Create event data with Activity instead of GroupEvent
event_data_with_activity = WorkflowEventData(
event=activity,
workflow_env=self.workflow.environment,
group=self.group,
)
self.handler.invoke_legacy_registry(event_data_with_activity, self.action, self.detector)
assert mock_send_alert.call_count == 1
(
notification_context,
alert_context,
metric_issue_context,
open_period_context,
organization,
notification_uuid,
) = self.unpack_kwargs(mock_send_alert)
# Verify that the same data is extracted from Activity.data as from GroupEvent.occurrence.evidence_data
self.assert_notification_context(
notification_context,
integration_id=None,
target_identifier=None,
target_display=None,
sentry_app_config=None,
sentry_app_id=str(self.sentry_app.id),
)
self.assert_alert_context(
alert_context,
name=self.detector.name,
action_identifier_id=self.detector.id,
threshold_type=AlertRuleThresholdType.BELOW,
detection_type=AlertRuleDetectionType.STATIC,
comparison_delta=None,
alert_threshold=self.evidence_data.conditions[2]["comparison"],
)
self.assert_metric_issue_context(
metric_issue_context,
open_period_identifier=self.open_period.id,
snuba_query=self.snuba_query,
new_status=IncidentStatus.CLOSED,
metric_value=123.45,
group=self.group,
title=self.group.title,
subscription=self.subscription,
)
self.assert_open_period_context(
open_period_context,
id=self.open_period.id,
date_started=self.group.first_seen,
date_closed=None,
)
assert organization == self.detector.project.organization
assert isinstance(notification_uuid, str)
| TestSentryAppMetricAlertHandler |
python | pytorch__pytorch | torch/_inductor/codecache.py | {
"start": 26293,
"end": 26416
} | class ____(Exception):
"""
Exception to indicate that the FxGraphCache should be bypassed.
"""
| BypassFxGraphCache |
python | apache__airflow | airflow-ctl/src/airflowctl/api/client.py | {
"start": 2935,
"end": 6374
} | class ____:
"""Credentials for the API."""
api_url: str | None
api_token: str | None
api_environment: str
def __init__(
self,
api_url: str | None = None,
api_token: str | None = None,
client_kind: ClientKind | None = None,
api_environment: str = "production",
):
self.api_url = api_url
self.api_token = api_token
self.api_environment = os.getenv("AIRFLOW_CLI_ENVIRONMENT") or api_environment
self.client_kind = client_kind
@property
def input_cli_config_file(self) -> str:
"""Generate path for the CLI config file."""
return f"{self.api_environment}.json"
def save(self):
"""Save the credentials to keyring and URL to disk as a file."""
default_config_dir = os.environ.get("AIRFLOW_HOME", os.path.expanduser("~/airflow"))
os.makedirs(default_config_dir, exist_ok=True)
with open(os.path.join(default_config_dir, self.input_cli_config_file), "w") as f:
json.dump({"api_url": self.api_url}, f)
try:
if os.getenv("AIRFLOW_CLI_DEBUG_MODE") == "true":
with open(
os.path.join(default_config_dir, f"debug_creds_{self.input_cli_config_file}"), "w"
) as f:
json.dump({f"api_token_{self.api_environment}": self.api_token}, f)
else:
keyring.set_password("airflowctl", f"api_token_{self.api_environment}", self.api_token)
except NoKeyringError as e:
log.error(e)
except TypeError as e:
# This happens when the token is None, which is not allowed by keyring
if self.api_token is None and self.client_kind == ClientKind.CLI:
raise AirflowCtlCredentialNotFoundException("No API token found. Please login first.") from e
def load(self) -> Credentials:
"""Load the credentials from keyring and URL from disk file."""
default_config_dir = os.environ.get("AIRFLOW_HOME", os.path.expanduser("~/airflow"))
config_path = os.path.join(default_config_dir, self.input_cli_config_file)
try:
with open(config_path) as f:
credentials = json.load(f)
self.api_url = credentials["api_url"]
if os.getenv("AIRFLOW_CLI_DEBUG_MODE") == "true":
debug_creds_path = os.path.join(
default_config_dir, f"debug_creds_{self.input_cli_config_file}"
)
with open(debug_creds_path) as df:
debug_credentials = json.load(df)
self.api_token = debug_credentials.get(f"api_token_{self.api_environment}")
else:
self.api_token = keyring.get_password("airflowctl", f"api_token_{self.api_environment}")
except FileNotFoundError:
if self.client_kind == ClientKind.AUTH:
# Saving the URL set from the Auth Commands if Kind is AUTH
self.save()
elif self.client_kind == ClientKind.CLI:
raise AirflowCtlCredentialNotFoundException(
f"No credentials found in {default_config_dir} for environment {self.api_environment}."
)
else:
raise AirflowCtlException(f"Unknown client kind: {self.client_kind}")
return self
| Credentials |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column.py | {
"start": 86872,
"end": 88680
} | class ____(
_DenseColumn,
collections.namedtuple(
'_NumericColumn',
['key', 'shape', 'default_value', 'dtype', 'normalizer_fn'])):
"""see `numeric_column`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(self.shape)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing numeric feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in this
function.
Returns:
Dense `Tensor` created within `_transform_feature`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
| _NumericColumn |
python | gevent__gevent | src/gevent/tests/test__all__.py | {
"start": 2236,
"end": 11547
} | class ____(object):
modname = None
stdlib_has_all = False
stdlib_all = None
stdlib_name = None
stdlib_module = None
@classmethod
def setUpClass(cls):
modname = cls.modname
if modname.endswith(PLATFORM_SPECIFIC_SUFFIXES):
raise unittest.SkipTest("Module %s is platform specific" % modname)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
try:
cls.module = importlib.import_module(modname)
except ImportError:
if modname in modules.OPTIONAL_MODULES:
msg = "Unable to import %s" % modname
raise unittest.SkipTest(msg)
raise
cls.__implements__ = getattr(cls.module, '__implements__', None)
cls.__imports__ = getattr(cls.module, '__imports__', [])
cls.__extensions__ = getattr(cls.module, '__extensions__', [])
cls.stdlib_name = MAPPING.get(modname)
if cls.stdlib_name is not None:
try:
cls.stdlib_module = __import__(cls.stdlib_name)
except ImportError:
pass
else:
cls.stdlib_has_all = True
cls.stdlib_all = getattr(cls.stdlib_module, '__all__', None)
if cls.stdlib_all is None:
cls.stdlib_has_all = False
cls.stdlib_all = [
name
for name in dir(cls.stdlib_module)
if not name.startswith('_')
and not isinstance(getattr(cls.stdlib_module, name), types.ModuleType)
]
def skipIfNoAll(self):
if not hasattr(self.module, '__all__'):
self.assertTrue(
self.modname in NO_ALL or self.modname.startswith('gevent.monkey.'),
"Module has no all"
)
self.skipTest("%s Needs __all__" % self.modname)
def test_all(self):
# Check that __all__ is present in the gevent module,
# and only includes things that actually exist and can be
# imported from it.
self.skipIfNoAll()
names = {}
six.exec_("from %s import *" % self.modname, names)
names.pop('__builtins__', None)
self.maxDiff = None
# It should match both as a set
self.assertEqual(set(names), set(self.module.__all__))
# and it should not contain duplicates.
self.assertEqual(sorted(names), sorted(self.module.__all__))
def test_all_formula(self):
self.skipIfNoAll()
# Check __all__ = __implements__ + __extensions__ + __imported__
# This is disabled because it was previously being skipped entirely
# back when we had to call things manually. In that time, it drifted
# out of sync. It should be enabled again and problems corrected.
all_calculated = (
tuple(self.__implements__ or ())
+ tuple(self.__imports__ or ())
+ tuple(self.__extensions__ or ())
)
try:
self.assertEqual(sorted(all_calculated),
sorted(self.module.__all__))
except AssertionError:
self.skipTest("Module %s fails the all formula; fix it" % self.modname)
except TypeError:
# TypeError: '<' not supported between instances of 'type' and 'str'
raise AssertionError(
"Unable to sort %r from all %s in %s" % (
all_calculated, self.module.__all__, self.module
)
)
def test_implements_presence_justified(self):
# Check that __implements__ is present only if the module is modeled
# after a module from stdlib (like gevent.socket).
if self.modname in ALLOW_IMPLEMENTS:
return
if self.__implements__ is not None and self.stdlib_module is None:
raise AssertionError(
'%s (%r) has __implements__ (%s) but no stdlib counterpart module exists (%s)'
% (self.modname, self.module, self.__implements__, self.stdlib_name))
@skip_if_no_stdlib_counterpart
def test_implements_subset_of_stdlib_all(self):
# Check that __implements__ + __imports__ is a subset of the
# corresponding standard module __all__ or dir()
for name in tuple(self.__implements__ or ()) + tuple(self.__imports__):
if name in self.stdlib_all:
continue
if name in COULD_BE_MISSING.get(self.stdlib_name, ()):
continue
if name in dir(self.stdlib_module): # like thread._local which is not in thread.__all__
continue
raise AssertionError('%r is not found in %r.__all__ nor in dir(%r)' % (name, self.stdlib_module, self.stdlib_module))
@skip_if_no_stdlib_counterpart
def test_implements_actually_implements(self):
# Check that the module actually implements the entries from
# __implements__
for name in self.__implements__ or ():
item = getattr(self.module, name)
try:
stdlib_item = getattr(self.stdlib_module, name)
self.assertIsNot(item, stdlib_item)
except AttributeError:
if name not in COULD_BE_MISSING.get(self.stdlib_name, []):
raise
@skip_if_no_stdlib_counterpart
def test_imports_actually_imports(self):
# Check that the module actually imports the entries from
# __imports__
for name in self.__imports__:
item = getattr(self.module, name)
stdlib_item = getattr(self.stdlib_module, name)
self.assertIs(item, stdlib_item)
@skip_if_no_stdlib_counterpart
def test_extensions_actually_extend(self):
# Check that the module actually defines new entries in
# __extensions__
if self.modname in EXTRA_EXTENSIONS:
return
for name in self.__extensions__:
try:
if hasattr(self.stdlib_module, name):
raise AssertionError("'%r' is not an extension, it is found in %r" % (
name, self.stdlib_module
))
except TypeError as ex:
# TypeError: attribute name must be string, not 'type'
raise AssertionError(
"Got TypeError (%r) getting %r (of %s) from %s/%s" % (
ex, name, self.__extensions__, self.stdlib_module, self.modname
)
)
@skip_if_no_stdlib_counterpart
def test_completeness(self): # pylint:disable=too-many-branches
# Check that __all__ (or dir()) of the corresponsing stdlib is
# a subset of __all__ of this module
missed = []
for name in self.stdlib_all:
if name not in getattr(self.module, '__all__', []):
missed.append(name)
# handle stuff like ssl.socket and ssl.socket_error which have no reason to be in gevent.ssl.__all__
if not self.stdlib_has_all:
for name in missed[:]:
if hasattr(self.module, name):
missed.remove(name)
# remove known misses
not_implemented = NOT_IMPLEMENTED.get(self.stdlib_name)
if not_implemented is not None:
result = []
for name in missed:
if name in not_implemented:
# We often don't want __all__ to be set because we wind up
# documenting things that we just copy in from the stdlib.
# But if we implement it, don't print a warning
if getattr(self.module, name, _MISSING) is _MISSING:
debug('IncompleteImplWarning: %s.%s' % (self.modname, name))
else:
result.append(name)
missed = result
if missed:
if self.stdlib_has_all:
msg = '''The following items
in %r.__all__
are missing from %r:
%r''' % (self.stdlib_module, self.module, missed)
else:
msg = '''The following items
in dir(%r)
are missing from %r:
%r''' % (self.stdlib_module, self.module, missed)
raise AssertionError(msg)
def _create_tests():
for _, modname in modules.walk_modules(include_so=False, recursive=True,
check_optional=False):
if modname.endswith(PLATFORM_SPECIFIC_SUFFIXES):
continue
if modname.endswith('__main__'):
# gevent.monkey.__main__ especially is a problem.
# These aren't meant to be imported anyway.
continue
orig_modname = modname
modname_no_period = orig_modname.replace('.', '_')
cls = type(
'Test_' + modname_no_period,
(AbstractTestMixin, unittest.TestCase),
{
'__module__': __name__,
'modname': orig_modname
}
)
globals()[cls.__name__] = cls
_create_tests()
if __name__ == "__main__":
unittest.main()
| AbstractTestMixin |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 1993,
"end": 2559
} | class ____:
def disappearing_comment():
return (
( # xx -x xxxxxxx xx xxx xxxxxxx.
'{{xxx_xxxxxxxxxx_xxxxxxxx}} xxx xxxx'
' {} {{xxxx}} >&2'
.format(
"{xxxx} {xxxxxx}"
if xxxxx.xx_xxxxxxxxxx
else ( # Disappearing Comment
"--xxxxxxx --xxxxxx=x --xxxxxx-xxxxx=xxxxxx"
" --xxxxxx-xxxx=xxxxxxxxxxx.xxx"
)
)
),
(x, y, z),
)
| A |
python | django__django | tests/custom_lookups/tests.py | {
"start": 6234,
"end": 6903
} | class ____(models.lookups.Lookup):
"""
InMonth matches if the column's month is the same as value's month.
"""
lookup_name = "inmonth"
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
# We need to be careful so that we get the params in right
# places.
params = lhs_params + rhs_params + lhs_params + rhs_params
return (
"%s >= date_trunc('month', %s) and "
"%s < date_trunc('month', %s) + interval '1 months'" % (lhs, rhs, lhs, rhs),
params,
)
| InMonth |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_deprecation.py | {
"start": 632,
"end": 812
} | class ____(MockClass1):
"""Inherit from deprecated class but does not call super().__init__."""
def __init__(self, a):
self.a = a
@deprecated("a message")
| MockClass5 |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 227315,
"end": 227661
} | class ____(VegaLiteSchema):
"""ConditionalPredicateValueDefnumberArraynullExprRef schema wrapper."""
_schema = {
"$ref": "#/definitions/ConditionalPredicate<(ValueDef<(number[]|null)>|ExprRef)>"
}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalPredicateValueDefnumberArraynullExprRef |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 1316,
"end": 3385
} | class ____(enum.IntEnum):
HasGil = 0
NoGil = 1
# For 'cdef func() nogil:' functions, as the GIL may be held while
# calling this function (thus contained 'nogil' blocks may be valid).
NoGilScope = 2
def relative_position(pos):
return (pos[0].get_filenametable_entry(), pos[1])
def embed_position(pos, docstring):
if not Options.embed_pos_in_docstring:
return docstring
pos_line = 'File: %s (starting at line %s)' % relative_position(pos)
if docstring is None:
# unicode string
return EncodedString(pos_line)
# make sure we can encode the filename in the docstring encoding
# otherwise make the docstring a unicode string
encoding = docstring.encoding
if encoding is not None:
try:
pos_line.encode(encoding)
except UnicodeEncodeError:
encoding = None
if not docstring:
# reuse the string encoding of the original docstring
doc = EncodedString(pos_line)
else:
doc = EncodedString(pos_line + '\n' + docstring)
doc.encoding = encoding
return doc
def write_func_call(func, codewriter_class):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], codewriter_class):
# here we annotate the code with this function call
# but only if new code is generated
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
node.pos[1:],
)
insertion_point = code.insertion_point()
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
if start != code.buffer.stream.tell():
code.putln(marker.replace('->', '<-', 1))
insertion_point.putln(marker)
return res
else:
return func(*args, **kwds)
return f
| NoGilState |
python | scikit-learn__scikit-learn | sklearn/linear_model/_stochastic_gradient.py | {
"start": 75040,
"end": 93800
} | class ____(OutlierMixin, BaseSGD):
"""Solves linear One-Class SVM using Stochastic Gradient Descent.
This implementation is meant to be used with a kernel approximation
technique (e.g. `sklearn.kernel_approximation.Nystroem`) to obtain results
similar to `sklearn.svm.OneClassSVM` which uses a Gaussian kernel by
default.
Read more in the :ref:`User Guide <sgd_online_one_class_svm>`.
.. versionadded:: 1.0
Parameters
----------
nu : float, default=0.5
The nu parameter of the One Class SVM: an upper bound on the
fraction of training errors and a lower bound of the fraction of
support vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. Defaults to True.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
`partial_fit`. Defaults to 1000.
Values must be in the range `[1, inf)`.
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol). Defaults to 1e-3.
Values must be in the range `[0.0, inf)`.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
verbose : int, default=0
The verbosity level.
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
learning_rate : {'constant', 'optimal', 'invscaling', 'adaptive'}, default='optimal'
The learning rate schedule to use with `fit`. (If using `partial_fit`,
learning rate must be controlled directly).
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
eta0 : float, default=0.01
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0, but note that eta0 is not used
by the default learning rate 'optimal'.
Values must be in the range `(0.0, inf)`.
power_t : float, default=0.5
The exponent for inverse scaling learning rate.
Values must be in the range `[0.0, inf)`.
.. deprecated:: 1.8
Negative values for `power_t` are deprecated in version 1.8 and will raise
an error in 1.10. Use values in the range [0.0, inf) instead.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : ndarray of shape (1, n_features)
Weights assigned to the features.
offset_ : ndarray of shape (1,)
Offset used to define the decision function from the raw scores.
We have the relation: decision_function = score_samples - offset.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples + 1)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
Notes
-----
This estimator has a linear complexity in the number of training samples
and is thus better suited than the `sklearn.svm.OneClassSVM`
implementation for datasets with a large number of training samples (say
> 10,000).
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> clf = linear_model.SGDOneClassSVM(random_state=42, tol=None)
>>> clf.fit(X)
SGDOneClassSVM(random_state=42, tol=None)
>>> print(clf.predict([[4, 4]]))
[1]
"""
loss_functions = {"hinge": (Hinge, 1.0)}
_parameter_constraints: dict = {
**BaseSGD._parameter_constraints,
"nu": [Interval(Real, 0.0, 1.0, closed="right")],
"learning_rate": [
StrOptions({"constant", "optimal", "invscaling", "adaptive"}),
Hidden(StrOptions({"pa1", "pa2"})),
],
"power_t": [Interval(Real, None, None, closed="neither")],
}
def __init__(
self,
nu=0.5,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
random_state=None,
learning_rate="optimal",
eta0=0.01,
power_t=0.5,
warm_start=False,
average=False,
):
self.nu = nu
super().__init__(
loss="hinge",
penalty="l2",
l1_ratio=0,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=DEFAULT_EPSILON,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0,
power_t=power_t,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
warm_start=warm_start,
average=average,
)
def _fit_one_class(self, X, alpha, sample_weight, learning_rate, max_iter):
"""Uses SGD implementation with X and y=np.ones(n_samples)."""
# The One-Class SVM uses the SGD implementation with
# y=np.ones(n_samples).
n_samples = X.shape[0]
y = np.ones(n_samples, dtype=X.dtype, order="C")
dataset, offset_decay = make_dataset(X, y, sample_weight)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
# early stopping is set to False for the One-Class SVM. thus
# validation_mask and validation_score_cb will be set to values
# associated to early_stopping=False in _make_validation_split and
# _make_validation_score_cb respectively.
validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight
)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
one_class = 1
# There are no class weights for the One-Class SVM and they are
# therefore set to 1.
pos_weight = 1
neg_weight = 1
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = 1 - self.offset_
average_coef = None # Not used
average_intercept = [0] # Not used
_plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype)
coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd(
coef,
intercept[0],
average_coef,
average_intercept[0],
self._loss_function_,
penalty_type,
alpha,
self.l1_ratio,
dataset,
validation_mask,
self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter,
tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
neg_weight,
pos_weight,
learning_rate_type,
self.eta0,
self.power_t,
one_class,
self.t_,
offset_decay,
self.average,
)
self.t_ += self.n_iter_ * n_samples
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
# made enough updates for averaging to be taken into account
self.coef_ = average_coef
self.offset_ = 1 - np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.offset_ = 1 - np.atleast_1d(intercept)
else:
self.offset_ = 1 - np.atleast_1d(intercept)
def _partial_fit(
self,
X,
alpha,
loss,
learning_rate,
max_iter,
sample_weight,
coef_init,
offset_init,
):
first_call = getattr(self, "coef_", None) is None
X = validate_data(
self,
X,
None,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
reset=first_call,
)
n_features = X.shape[1]
# Allocate datastructures from input arguments
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# We use intercept = 1 - offset where intercept is the intercept of
# the SGD implementation and offset is the offset of the One-Class SVM
# optimization problem.
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(
n_classes=1,
n_features=n_features,
input_dtype=X.dtype,
coef_init=coef_init,
intercept_init=offset_init,
one_class=1,
)
elif n_features != self.coef_.shape[-1]:
raise ValueError(
"Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1])
)
if self.average and getattr(self, "_average_coef", None) is None:
self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C")
self._average_intercept = np.zeros(1, dtype=X.dtype, order="C")
self._loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
self._fit_one_class(
X,
alpha=alpha,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter,
)
return self
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y=None, sample_weight=None):
"""Fit linear One-Class SVM with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns a fitted instance of self.
"""
if not hasattr(self, "coef_"):
self._more_validate_params(for_partial_fit=True)
alpha = self.nu / 2
return self._partial_fit(
X,
alpha,
loss=self.loss,
learning_rate=self.learning_rate,
max_iter=1,
sample_weight=sample_weight,
coef_init=None,
offset_init=None,
)
def _fit(
self,
X,
alpha,
loss,
learning_rate,
coef_init=None,
offset_init=None,
sample_weight=None,
):
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if offset_init is None:
offset_init = self.offset_
else:
self.coef_ = None
self.offset_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(
X,
alpha,
loss,
learning_rate,
self.max_iter,
sample_weight,
coef_init,
offset_init,
)
if (
self.tol is not None
and self.tol > -np.inf
and self.n_iter_ == self.max_iter
):
warnings.warn(
(
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit."
),
ConvergenceWarning,
)
if self.power_t < 0:
warnings.warn(
"Negative values for `power_t` are deprecated in version 1.8 "
"and will raise an error in 1.10. "
"Use values in the range [0.0, inf) instead.",
FutureWarning,
)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):
"""Fit linear One-Class SVM with Stochastic Gradient Descent.
This solves an equivalent optimization problem of the
One-Class SVM primal optimization problem and returns a weight vector
w and an offset rho such that the decision function is given by
<w, x> - rho.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
offset_init : array, shape (n_classes,)
The initial offset to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self : object
Returns a fitted instance of self.
"""
self._more_validate_params()
alpha = self.nu / 2
self._fit(
X,
alpha=alpha,
loss=self.loss,
learning_rate=self.learning_rate,
coef_init=coef_init,
offset_init=offset_init,
sample_weight=sample_weight,
)
return self
def decision_function(self, X):
"""Signed distance to the separating hyperplane.
Signed distance is positive for an inlier and negative for an
outlier.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
dec : array-like, shape (n_samples,)
Decision function values of the samples.
"""
check_is_fitted(self, "coef_")
X = validate_data(self, X, accept_sparse="csr", reset=False)
decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_
return decisions.ravel()
def score_samples(self, X):
"""Raw scoring function of the samples.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
score_samples : array-like, shape (n_samples,)
Unshiffted scoring function values of the samples.
"""
score_samples = self.decision_function(X) + self.offset_
return score_samples
def predict(self, X):
"""Return labels (1 inlier, -1 outlier) of the samples.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
y : array, shape (n_samples,)
Labels of the samples.
"""
y = (self.decision_function(X) >= 0).astype(np.int32)
y[y == 0] = -1 # for consistency with outlier detectors
return y
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| SGDOneClassSVM |
python | simonw__sqlite-utils | sqlite_utils/db.py | {
"start": 4302,
"end": 5839
} | class ____:
pass
DEFAULT = Default()
COLUMN_TYPE_MAPPING = {
float: "REAL",
int: "INTEGER",
bool: "INTEGER",
str: "TEXT",
dict: "TEXT",
tuple: "TEXT",
list: "TEXT",
bytes.__class__: "BLOB",
bytes: "BLOB",
memoryview: "BLOB",
datetime.datetime: "TEXT",
datetime.date: "TEXT",
datetime.time: "TEXT",
datetime.timedelta: "TEXT",
decimal.Decimal: "REAL",
None.__class__: "TEXT",
uuid.UUID: "TEXT",
# SQLite explicit types
"TEXT": "TEXT",
"INTEGER": "INTEGER",
"FLOAT": "FLOAT",
"REAL": "REAL",
"BLOB": "BLOB",
"text": "TEXT",
"str": "TEXT",
"integer": "INTEGER",
"int": "INTEGER",
"float": "REAL",
"real": "REAL",
"blob": "BLOB",
"bytes": "BLOB",
}
# If numpy is available, add more types
if np:
try:
COLUMN_TYPE_MAPPING.update(
{
np.int8: "INTEGER",
np.int16: "INTEGER",
np.int32: "INTEGER",
np.int64: "INTEGER",
np.uint8: "INTEGER",
np.uint16: "INTEGER",
np.uint32: "INTEGER",
np.uint64: "INTEGER",
np.float16: "REAL",
np.float32: "REAL",
np.float64: "REAL",
}
)
except AttributeError:
# https://github.com/simonw/sqlite-utils/issues/632
pass
# If pandas is available, add more types
if pd:
COLUMN_TYPE_MAPPING.update({pd.Timestamp: "TEXT"}) # type: ignore
| Default |
python | ZoranPandovski__al-go-rithms | games/Python/paddleball.py | {
"start": 1831,
"end": 3535
} | class ____:
"""This is the user controlled paddle class."""
def __init__(self, canvas, color):
self.canvas = canvas #canvas = var with functions inside
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color) #creating paddle
self.canvas.move(self.id, 200, 300)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.started = False
self.canvas.bind_all('<KeyPress-Left>', self.turn_left) #sensing whether right of left key pressed and go in that direction by calling function turn_left or turn_right.
self.canvas.bind_all('<KeyRelease-Left>', self.stop_left)#Senses when key released and stops
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)#to turn right when right key pressed
self.canvas.bind_all('<KeyRelease-Right>', self.stop_right)#stops when released
self.canvas.bind_all('<Button-1>', self.start_game) #game starts when canvas is clicked
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
else:
self.x = -3
def stop_left(self, evt):
self.x = 0
def turn_right(self, evt):
pos = self.canvas.coords(self.id)
if pos[2] >= 500:
self.x = 0
else:
self.x = 3
def stop_right(self, evt):
self.x = 0
def start_game(self, evt):
self.started = True
| Paddle |
python | django-haystack__django-haystack | test_haystack/whoosh_tests/test_whoosh_backend.py | {
"start": 2300,
"end": 2734
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
month = indexes.CharField(indexed=False)
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return MockModel
def prepare_text(self, obj):
return "Indexed!\n%s" % obj.pk
def prepare_month(self, obj):
return "%02d" % obj.pub_date.month
| WhooshMaintainTypeMockSearchIndex |
python | kamyu104__LeetCode-Solutions | Python/maximum-product-after-k-increments.py | {
"start": 1308,
"end": 1699
} | class ____(object):
def maximumProduct(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
MOD = 10**9+7
min_heap = nums
heapq.heapify(min_heap)
while k:
heapq.heappush(min_heap, heapq.heappop(min_heap)+1)
k -= 1
return reduce(lambda x, y: x*y%MOD, min_heap)
| Solution3 |
python | ipython__ipython | tests/test_interactivshell.py | {
"start": 4870,
"end": 7576
} | class ____(unittest.TestCase):
def rl_hist_entries(self, rl, n):
"""Get last n readline history entries as a list"""
return [
rl.get_history_item(rl.get_current_history_length() - x)
for x in range(n - 1, -1, -1)
]
@mock_input
def test_inputtransformer_syntaxerror(self):
ip = get_ipython()
ip.input_transformers_post.append(syntax_error_transformer)
try:
# raise Exception
with tt.AssertPrints("4", suppress=False):
yield "print(2*2)"
with tt.AssertPrints("SyntaxError: input contains", suppress=False):
yield "print(2345) # syntaxerror"
with tt.AssertPrints("16", suppress=False):
yield "print(4*4)"
finally:
ip.input_transformers_post.remove(syntax_error_transformer)
def test_repl_not_plain_text(self):
ip = get_ipython()
formatter = ip.display_formatter
assert formatter.active_types == ["text/plain"]
# terminal may have arbitrary mimetype handler to open external viewer
# or inline images.
assert formatter.ipython_display_formatter.enabled
class Test(object):
def __repr__(self):
return "<Test %i>" % id(self)
def _repr_html_(self):
return "<html>"
# verify that HTML repr isn't computed
obj = Test()
data, _ = formatter.format(obj)
self.assertEqual(data, {"text/plain": repr(obj)})
class Test2(Test):
def _ipython_display_(self):
from IPython.display import display, HTML
display(HTML("<custom>"))
# verify that mimehandlers are called
called = False
def handler(data, metadata):
print("Handler called")
nonlocal called
called = True
ip.display_formatter.active_types.append("text/html")
ip.display_formatter.formatters["text/html"].enabled = True
ip.mime_renderers["text/html"] = handler
try:
obj = Test()
display(obj)
finally:
ip.display_formatter.formatters["text/html"].enabled = False
del ip.mime_renderers["text/html"]
assert called == True
def syntax_error_transformer(lines):
"""Transformer that throws SyntaxError if 'syntaxerror' is in the code."""
for line in lines:
pos = line.find("syntaxerror")
if pos >= 0:
e = SyntaxError('input contains "syntaxerror"')
e.text = line
e.offset = pos + 1
raise e
return lines
| InteractiveShellTestCase |
python | fluentpython__example-code | 01-data-model/vector2d.py | {
"start": 24,
"end": 509
} | class ____:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
return 'Vector(%r, %r)' % (self.x, self.y)
def __abs__(self):
return hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Vector(x, y)
def __mul__(self, scalar):
return Vector(self.x * scalar, self.y * scalar)
| Vector |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 19386,
"end": 19726
} | class ____(GroupType):
type_id = 2002
slug = "profile_image_decode_main_thread"
description = "Image Decoding on Main Thread"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.MOBILE.value
default_priority = PriorityLevel.LOW
released = True
@dataclass(frozen=True)
| ProfileImageDecodeGroupType |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_date04.py | {
"start": 342,
"end": 2116
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_date04.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [51761152, 51762688]
worksheet.set_column("A:A", 12)
dates = [
date(2013, 1, 1),
date(2013, 1, 2),
date(2013, 1, 3),
date(2013, 1, 4),
date(2013, 1, 5),
date(2013, 1, 6),
date(2013, 1, 7),
date(2013, 1, 8),
date(2013, 1, 9),
date(2013, 1, 10),
]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
worksheet.write_column("A1", dates, date_format)
worksheet.write_column("B1", values)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$10",
"values": "=Sheet1!$B$1:$B$10",
}
)
chart.set_x_axis(
{
"date_axis": True,
"minor_unit": 1,
"major_unit": 1,
"minor_unit_type": "months",
"major_unit_type": "years",
"num_format": "dd/mm/yyyy",
"num_format_linked": True,
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | openai__openai-python | src/openai/types/beta/threads/text_content_block_param.py | {
"start": 221,
"end": 407
} | class ____(TypedDict, total=False):
text: Required[str]
"""Text content to be sent to the model"""
type: Required[Literal["text"]]
"""Always `text`."""
| TextContentBlockParam |
python | ansible__ansible | test/integration/targets/no_log/action_plugins/action_sets_no_log.py | {
"start": 84,
"end": 319
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
return dict(changed=False, failed=False, msg="action result should be masked", _ansible_no_log="yeppers") # ensure that a truthy non-bool works here
| ActionModule |
python | mlflow__mlflow | tests/db/test_schema.py | {
"start": 998,
"end": 4695
} | class ____(NamedTuple):
table: str
columns: str
_CREATE_TABLE_REGEX = re.compile(
r"""
CREATE TABLE (?P<table>\S+?) \(
(?P<columns>.+?)
\)
""".strip(),
flags=re.DOTALL,
)
def parse_create_tables(schema):
return [
_CreateTable(
table=m.group("table"),
columns=set(m.group("columns").splitlines()),
)
for m in _CREATE_TABLE_REGEX.finditer(schema)
]
def schema_equal(schema_a, schema_b):
create_tables_a = parse_create_tables(schema_a)
create_tables_b = parse_create_tables(schema_b)
assert create_tables_a != []
assert create_tables_b != []
return create_tables_a == create_tables_b
def get_schema_path(db_uri):
return Path(__file__).parent / "schemas" / (get_database_dialect(db_uri) + ".sql")
def iter_parameter_sets():
a = """
CREATE TABLE table (
col VARCHAR(10)
)
"""
b = """
CREATE TABLE table (
col VARCHAR(10)
)
"""
yield pytest.param(a, b, True, id="identical schemas")
a = """
CREATE TABLE table1 (
col VARCHAR(10)
)
"""
b = """
CREATE TABLE table2 (
col VARCHAR(10)
)
"""
yield pytest.param(a, b, False, id="different table names")
a = """
CREATE TABLE table (
col1 VARCHAR(10)
)
"""
b = """
CREATE TABLE table (
col2 VARCHAR(10)
)
"""
yield pytest.param(a, b, False, id="different column names")
@pytest.mark.parametrize(("a", "b", "expected"), iter_parameter_sets())
def test_schema_equal(a, b, expected):
assert schema_equal(a, b) is expected
def initialize_database():
with mlflow.start_run():
pass
def get_schema_update_command(dialect):
this_script = Path(__file__).relative_to(Path.cwd())
docker_compose_yml = this_script.parent / "compose.yml"
return f"docker compose -f {docker_compose_yml} run --rm mlflow-{dialect} python {this_script}"
def test_schema_is_up_to_date():
initialize_database()
tracking_uri = get_tracking_uri()
schema_path = get_schema_path(tracking_uri)
existing_schema = schema_path.read_text()
latest_schema = dump_schema(tracking_uri)
dialect = get_database_dialect(tracking_uri)
update_command = get_schema_update_command(dialect)
message = (
f"{schema_path.relative_to(Path.cwd())} is not up-to-date. "
f"Please run this command to update it: {update_command}"
)
diff = "".join(
difflib.ndiff(
existing_schema.splitlines(keepends=True), latest_schema.splitlines(keepends=True)
)
)
rel_path = schema_path.relative_to(Path.cwd())
message = f"""
=================================== EXPECTED ===================================
{latest_schema}
==================================== ACTUAL ====================================
{existing_schema}
===================================== DIFF =====================================
{diff}
================================== HOW TO FIX ==================================
Manually copy & paste the expected schema in {rel_path} or run the following command:
{update_command}
"""
assert schema_equal(existing_schema, latest_schema), message
def main():
tracking_uri = get_tracking_uri()
assert tracking_uri, f"Environment variable {MLFLOW_TRACKING_URI} must be set"
get_database_dialect(tracking_uri) # Ensure `tracking_uri` is a database URI
mlflow.set_tracking_uri(tracking_uri)
initialize_database()
schema_path = get_schema_path(tracking_uri)
existing_schema = schema_path.read_text()
latest_schema = dump_schema(tracking_uri)
if not schema_equal(existing_schema, latest_schema):
schema_path.write_text(latest_schema)
if __name__ == "__main__":
main()
| _CreateTable |
python | huggingface__transformers | src/transformers/models/stablelm/modeling_stablelm.py | {
"start": 35218,
"end": 35324
} | class ____(GenericForSequenceClassification, StableLmPreTrainedModel): ...
| StableLmForSequenceClassification |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v1.py | {
"start": 1354,
"end": 5618
} | class ____(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
# checks that clipnorm >= 0 and clipvalue >= 0
if kwargs[k] < 0:
raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
# Set this to False, indicating `apply_gradients` does not take the
# `experimental_aggregate_gradients` argument.
_HAS_AGGREGATE_GRAD = False
def _create_all_weights(self, params):
"""Creates and sets all optimizer weights.
Args:
params: list or tuple of `Variable` objects that will be minimized
using this optimizer.
Returns:
Specific weight values that are used in `get_updates`
"""
raise NotImplementedError
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Args:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
grads = backend.gradients(loss, params)
if any(g is None for g in grads):
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'backend.argmax, backend.round, backend.eval.')
if hasattr(self, 'clipnorm'):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, 'clipvalue'):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Args:
weights: a list of Numpy arrays. The number of arrays and their shape
must match number of the dimensions of the weights of the optimizer
(i.e. it should match the output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('Length of the specified weight list (' +
str(len(weights)) +
') does not match the number of weights '
'of the optimizer (' + str(len(params)) + ')')
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Optimizer weight shape ' + str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return backend.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
| Optimizer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 5688,
"end": 5897
} | class ____(ArgumentError):
"""raised when a constraint refers to a string column name that
is not present in the table being constrained.
.. versionadded:: 2.0
"""
| ConstraintColumnNotFoundError |
python | cython__cython | tests/run/ext_auto_richcmp.py | {
"start": 307,
"end": 1443
} | class ____(X):
"""
>>> a = ClassEq(1)
>>> b = ClassEq(2)
>>> c = ClassEq(1)
>>> a == a
True
>>> a != a
False
>>> a == b
False
>>> a != b
True
>>> a == c
True
>>> a != c
False
>>> b == c
False
>>> b != c
True
>>> c == a
True
>>> c != a
False
>>> b == a
False
>>> b != a
True
>>> a < b # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError...
>>> a > b # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError...
>>> a <= b # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError...
>>> a >= b # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError...
>>> print(a.__eq__.__doc__)
EQ
"""
def __eq__(self, other):
"""EQ"""
assert 1 <= self.x <= 2
assert isinstance(self, ClassEq), type(self)
if isinstance(other, X):
return self.x == x_of(other)
elif isinstance(other, int):
return self.x < other
return NotImplemented
@cython.cclass
| ClassEq |
python | walkccc__LeetCode | solutions/846. Hand of Straights/846.py | {
"start": 0,
"end": 351
} | class ____:
def isNStraightHand(self, hand: list[int], groupSize: int) -> bool:
count = collections.Counter(hand)
for start in sorted(count):
value = count[start]
if value > 0:
for i in range(start, start + groupSize):
count[i] -= value
if count[i] < 0:
return False
return True
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.