language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
jmcnamara__XlsxWriter
xlsxwriter/styles.py
{ "start": 553, "end": 24923 }
class ____(xmlwriter.XMLwriter): """ A class for writing the Excel XLSX Styles file. """ ########################################################################### # # Public API. # ########################################################################### def __init__(self) -> None: """ Constructor. """ super().__init__() self.xf_formats = [] self.palette = [] self.font_count = 0 self.num_formats = [] self.border_count = 0 self.fill_count = 0 self.custom_colors = [] self.dxf_formats = [] self.has_hyperlink = False self.hyperlink_font_id = 0 self.has_comments = False ########################################################################### # # Private API. # ########################################################################### def _assemble_xml_file(self) -> None: # Assemble and write the XML file. # Write the XML declaration. self._xml_declaration() # Add the style sheet. self._write_style_sheet() # Write the number formats. self._write_num_fmts() # Write the fonts. self._write_fonts() # Write the fills. self._write_fills() # Write the borders element. self._write_borders() # Write the cellStyleXfs element. self._write_cell_style_xfs() # Write the cellXfs element. self._write_cell_xfs() # Write the cellStyles element. self._write_cell_styles() # Write the dxfs element. self._write_dxfs() # Write the tableStyles element. self._write_table_styles() # Write the colors element. self._write_colors() # Close the style sheet tag. self._xml_end_tag("styleSheet") # Close the file. self._xml_close() def _set_style_properties(self, properties) -> None: # Pass in the Format objects and other properties used in the styles. self.xf_formats = properties[0] self.palette = properties[1] self.font_count = properties[2] self.num_formats = properties[3] self.border_count = properties[4] self.fill_count = properties[5] self.custom_colors = properties[6] self.dxf_formats = properties[7] self.has_comments = properties[8] ########################################################################### # # XML methods. # ########################################################################### def _write_style_sheet(self) -> None: # Write the <styleSheet> element. xmlns = "http://schemas.openxmlformats.org/spreadsheetml/2006/main" attributes = [("xmlns", xmlns)] self._xml_start_tag("styleSheet", attributes) def _write_num_fmts(self) -> None: # Write the <numFmts> element. if not self.num_formats: return attributes = [("count", len(self.num_formats))] self._xml_start_tag("numFmts", attributes) # Write the numFmts elements. for index, num_format in enumerate(self.num_formats, 164): self._write_num_fmt(index, num_format) self._xml_end_tag("numFmts") def _write_num_fmt(self, num_fmt_id, format_code) -> None: # Write the <numFmt> element. format_codes = { 0: "General", 1: "0", 2: "0.00", 3: "#,##0", 4: "#,##0.00", 5: "($#,##0_);($#,##0)", 6: "($#,##0_);[Red]($#,##0)", 7: "($#,##0.00_);($#,##0.00)", 8: "($#,##0.00_);[Red]($#,##0.00)", 9: "0%", 10: "0.00%", 11: "0.00E+00", 12: "# ?/?", 13: "# ??/??", 14: "m/d/yy", 15: "d-mmm-yy", 16: "d-mmm", 17: "mmm-yy", 18: "h:mm AM/PM", 19: "h:mm:ss AM/PM", 20: "h:mm", 21: "h:mm:ss", 22: "m/d/yy h:mm", 37: "(#,##0_);(#,##0)", 38: "(#,##0_);[Red](#,##0)", 39: "(#,##0.00_);(#,##0.00)", 40: "(#,##0.00_);[Red](#,##0.00)", 41: '_(* #,##0_);_(* (#,##0);_(* "-"_);_(_)', 42: '_($* #,##0_);_($* (#,##0);_($* "-"_);_(_)', 43: '_(* #,##0.00_);_(* (#,##0.00);_(* "-"??_);_(_)', 44: '_($* #,##0.00_);_($* (#,##0.00);_($* "-"??_);_(_)', 45: "mm:ss", 46: "[h]:mm:ss", 47: "mm:ss.0", 48: "##0.0E+0", 49: "@", } # Set the format code for built-in number formats. if num_fmt_id < 164: format_code = format_codes.get(num_fmt_id, "General") attributes = [ ("numFmtId", num_fmt_id), ("formatCode", format_code), ] self._xml_empty_tag("numFmt", attributes) def _write_fonts(self) -> None: # Write the <fonts> element. if self.has_comments: # Add extra font for comments. attributes = [("count", self.font_count + 1)] else: attributes = [("count", self.font_count)] self._xml_start_tag("fonts", attributes) # Write the font elements for xf_format objects that have them. for xf_format in self.xf_formats: if xf_format.has_font: self._write_font(xf_format) if self.has_comments: self._write_comment_font() self._xml_end_tag("fonts") def _write_font(self, xf_format, is_dxf_format=False) -> None: # Write the <font> element. self._xml_start_tag("font") # The condense and extend elements are mainly used in dxf formats. if xf_format.font_condense: self._write_condense() if xf_format.font_extend: self._write_extend() if xf_format.bold: self._xml_empty_tag("b") if xf_format.italic: self._xml_empty_tag("i") if xf_format.font_strikeout: self._xml_empty_tag("strike") if xf_format.font_outline: self._xml_empty_tag("outline") if xf_format.font_shadow: self._xml_empty_tag("shadow") # Handle the underline variants. if xf_format.underline: self._write_underline(xf_format.underline) if xf_format.font_script == 1: self._write_vert_align("superscript") if xf_format.font_script == 2: self._write_vert_align("subscript") if not is_dxf_format: self._xml_empty_tag("sz", [("val", xf_format.font_size)]) if xf_format.theme == -1: # Ignore for excel2003_style. pass elif xf_format.theme: self._write_color([("theme", xf_format.theme)]) elif xf_format.color_indexed: self._write_color([("indexed", xf_format.color_indexed)]) elif xf_format.font_color: color = xf_format.font_color if not color._is_automatic: self._write_color(color._attributes()) elif not is_dxf_format: self._write_color([("theme", 1)]) if not is_dxf_format: self._xml_empty_tag("name", [("val", xf_format.font_name)]) if xf_format.font_family: self._xml_empty_tag("family", [("val", xf_format.font_family)]) if xf_format.font_charset: self._xml_empty_tag("charset", [("val", xf_format.font_charset)]) if xf_format.font_scheme in ("major", "minor"): self._xml_empty_tag("scheme", [("val", xf_format.font_scheme)]) if xf_format.hyperlink: self.has_hyperlink = True if self.hyperlink_font_id == 0: self.hyperlink_font_id = xf_format.font_index self._xml_end_tag("font") def _write_comment_font(self) -> None: # Write the <font> element for comments. self._xml_start_tag("font") self._xml_empty_tag("sz", [("val", 8)]) self._write_color([("indexed", 81)]) self._xml_empty_tag("name", [("val", "Tahoma")]) self._xml_empty_tag("family", [("val", 2)]) self._xml_end_tag("font") def _write_underline(self, underline) -> None: # Write the underline font element. if underline == 2: attributes = [("val", "double")] elif underline == 33: attributes = [("val", "singleAccounting")] elif underline == 34: attributes = [("val", "doubleAccounting")] else: # Default to single underline. attributes = [] self._xml_empty_tag("u", attributes) def _write_vert_align(self, val) -> None: # Write the <vertAlign> font sub-element. attributes = [("val", val)] self._xml_empty_tag("vertAlign", attributes) def _write_color(self, attributes) -> None: # Write the <color> element. self._xml_empty_tag("color", attributes) def _write_fills(self) -> None: # Write the <fills> element. attributes = [("count", self.fill_count)] self._xml_start_tag("fills", attributes) # Write the default fill element. self._write_default_fill("none") self._write_default_fill("gray125") # Write the fill elements for xf_format objects that have them. for xf_format in self.xf_formats: if xf_format.has_fill: self._write_fill(xf_format) self._xml_end_tag("fills") def _write_default_fill(self, pattern_type) -> None: # Write the <fill> element for the default fills. self._xml_start_tag("fill") self._xml_empty_tag("patternFill", [("patternType", pattern_type)]) self._xml_end_tag("fill") def _write_fill(self, xf_format, is_dxf_format=False) -> None: # Write the <fill> element. pattern = xf_format.pattern bg_color = xf_format.bg_color fg_color = xf_format.fg_color # Colors for dxf formats are handled differently from normal formats # since the normal xf_format reverses the meaning of BG and FG for # solid fills. if is_dxf_format: bg_color = xf_format.dxf_bg_color fg_color = xf_format.dxf_fg_color patterns = ( "none", "solid", "mediumGray", "darkGray", "lightGray", "darkHorizontal", "darkVertical", "darkDown", "darkUp", "darkGrid", "darkTrellis", "lightHorizontal", "lightVertical", "lightDown", "lightUp", "lightGrid", "lightTrellis", "gray125", "gray0625", ) # Special handling for pattern only case. if not fg_color and not bg_color and patterns[pattern]: self._write_default_fill(patterns[pattern]) return self._xml_start_tag("fill") # The "none" pattern is handled differently for dxf formats. if is_dxf_format and pattern <= 1: self._xml_start_tag("patternFill") else: self._xml_start_tag("patternFill", [("patternType", patterns[pattern])]) if fg_color: if not fg_color._is_automatic: self._xml_empty_tag("fgColor", fg_color._attributes()) if bg_color: if not bg_color._is_automatic: self._xml_empty_tag("bgColor", bg_color._attributes()) else: if not is_dxf_format and pattern <= 1: self._xml_empty_tag("bgColor", [("indexed", 64)]) self._xml_end_tag("patternFill") self._xml_end_tag("fill") def _write_borders(self) -> None: # Write the <borders> element. attributes = [("count", self.border_count)] self._xml_start_tag("borders", attributes) # Write the border elements for xf_format objects that have them. for xf_format in self.xf_formats: if xf_format.has_border: self._write_border(xf_format) self._xml_end_tag("borders") def _write_border(self, xf_format, is_dxf_format=False) -> None: # Write the <border> element. attributes = [] # Diagonal borders add attributes to the <border> element. if xf_format.diag_type == 1: attributes.append(("diagonalUp", 1)) elif xf_format.diag_type == 2: attributes.append(("diagonalDown", 1)) elif xf_format.diag_type == 3: attributes.append(("diagonalUp", 1)) attributes.append(("diagonalDown", 1)) # Ensure that a default diag border is set if the diag type is set. if xf_format.diag_type and not xf_format.diag_border: xf_format.diag_border = 1 # Write the start border tag. self._xml_start_tag("border", attributes) # Write the <border> sub elements. self._write_sub_border("left", xf_format.left, xf_format.left_color) self._write_sub_border("right", xf_format.right, xf_format.right_color) self._write_sub_border("top", xf_format.top, xf_format.top_color) self._write_sub_border("bottom", xf_format.bottom, xf_format.bottom_color) # Condition DXF formats don't allow diagonal borders. if not is_dxf_format: self._write_sub_border( "diagonal", xf_format.diag_border, xf_format.diag_color ) if is_dxf_format: self._write_sub_border("vertical", None, None) self._write_sub_border("horizontal", None, None) self._xml_end_tag("border") def _write_sub_border(self, border_type, style, color) -> None: # Write the <border> sub elements such as <right>, <top>, etc. attributes = [] if not style: self._xml_empty_tag(border_type) return border_styles = ( "none", "thin", "medium", "dashed", "dotted", "thick", "double", "hair", "mediumDashed", "dashDot", "mediumDashDot", "dashDotDot", "mediumDashDotDot", "slantDashDot", ) attributes.append(("style", border_styles[style])) self._xml_start_tag(border_type, attributes) if color and not color._is_automatic: self._xml_empty_tag("color", color._attributes()) else: self._xml_empty_tag("color", [("auto", 1)]) self._xml_end_tag(border_type) def _write_cell_style_xfs(self) -> None: # Write the <cellStyleXfs> element. count = 1 if self.has_hyperlink: count = 2 attributes = [("count", count)] self._xml_start_tag("cellStyleXfs", attributes) style_format = self.xf_formats[0] self._write_xf(style_format, XFormatType.STYLE) if self.has_hyperlink: self._write_style_xf(True, self.hyperlink_font_id) self._xml_end_tag("cellStyleXfs") def _write_cell_xfs(self) -> None: # Write the <cellXfs> element. formats = self.xf_formats # Workaround for when the last xf_format is used for the comment font # and shouldn't be used for cellXfs. last_format = formats[-1] if last_format.font_only: formats.pop() attributes = [("count", len(formats))] self._xml_start_tag("cellXfs", attributes) # Write the xf elements. cell_type = XFormatType.DEFAULT for xf_format in formats: self._write_xf(xf_format, cell_type) cell_type = XFormatType.USER self._xml_end_tag("cellXfs") def _write_style_xf(self, has_hyperlink=False, font_id=0) -> None: # Write the style <xf> element. num_fmt_id = 0 fill_id = 0 border_id = 0 attributes = [ ("numFmtId", num_fmt_id), ("fontId", font_id), ("fillId", fill_id), ("borderId", border_id), ] if has_hyperlink: attributes.append(("applyNumberFormat", 0)) attributes.append(("applyFill", 0)) attributes.append(("applyBorder", 0)) attributes.append(("applyAlignment", 0)) attributes.append(("applyProtection", 0)) self._xml_start_tag("xf", attributes) self._xml_empty_tag("alignment", [("vertical", "top")]) self._xml_empty_tag("protection", [("locked", 0)]) self._xml_end_tag("xf") else: self._xml_empty_tag("xf", attributes) def _write_xf(self, xf_format, xf_type: XFormatType) -> None: # Write the <xf> element. xf_id = xf_format.xf_id font_id = xf_format.font_index fill_id = xf_format.fill_index border_id = xf_format.border_index num_fmt_id = xf_format.num_format_index has_checkbox = xf_format.checkbox has_alignment = False has_protection = False attributes = [ ("numFmtId", num_fmt_id), ("fontId", font_id), ("fillId", fill_id), ("borderId", border_id), ] if xf_type != XFormatType.STYLE: attributes.append(("xfId", xf_id)) if xf_format.quote_prefix: attributes.append(("quotePrefix", 1)) if xf_format.num_format_index > 0: attributes.append(("applyNumberFormat", 1)) # Add applyFont attribute if XF format uses a font element. if xf_format.font_index > 0 and not xf_format.hyperlink: attributes.append(("applyFont", 1)) # Add applyFill attribute if XF format uses a fill element. if xf_format.fill_index > 0: attributes.append(("applyFill", 1)) # Add applyBorder attribute if XF format uses a border element. if xf_format.border_index > 0: attributes.append(("applyBorder", 1)) # Check if XF format has alignment properties set. (apply_align, align) = xf_format._get_align_properties() # Check if an alignment sub-element should be written. if apply_align and align: has_alignment = True # We can also have applyAlignment without a sub-element. if (apply_align or xf_format.hyperlink) and xf_type == XFormatType.USER: attributes.append(("applyAlignment", 1)) # Check for cell protection properties. protection = xf_format._get_protection_properties() if protection or xf_format.hyperlink: attributes.append(("applyProtection", 1)) if not xf_format.hyperlink: has_protection = True # Write XF with sub-elements if required. if has_alignment or has_protection or has_checkbox: self._xml_start_tag("xf", attributes) if has_alignment: self._xml_empty_tag("alignment", align) if has_protection: self._xml_empty_tag("protection", protection) if has_checkbox: self._write_xf_format_extensions() self._xml_end_tag("xf") else: self._xml_empty_tag("xf", attributes) def _write_cell_styles(self) -> None: # Write the <cellStyles> element. count = 1 if self.has_hyperlink: count = 2 attributes = [("count", count)] self._xml_start_tag("cellStyles", attributes) if self.has_hyperlink: self._write_cell_style("Hyperlink", 1, 8) self._write_cell_style() self._xml_end_tag("cellStyles") def _write_cell_style(self, name="Normal", xf_id=0, builtin_id=0) -> None: # Write the <cellStyle> element. attributes = [ ("name", name), ("xfId", xf_id), ("builtinId", builtin_id), ] self._xml_empty_tag("cellStyle", attributes) def _write_dxfs(self) -> None: # Write the <dxfs> element. formats = self.dxf_formats count = len(formats) attributes = [("count", len(formats))] if count: self._xml_start_tag("dxfs", attributes) # Write the font elements for xf_format objects that have them. for dxf_format in self.dxf_formats: self._xml_start_tag("dxf") if dxf_format.has_dxf_font: self._write_font(dxf_format, True) if dxf_format.num_format_index: self._write_num_fmt( dxf_format.num_format_index, dxf_format.num_format ) if dxf_format.has_dxf_fill: self._write_fill(dxf_format, True) if dxf_format.has_dxf_border: self._write_border(dxf_format, True) if dxf_format.checkbox: self._write_dxf_format_extensions() self._xml_end_tag("dxf") self._xml_end_tag("dxfs") else: self._xml_empty_tag("dxfs", attributes) def _write_table_styles(self) -> None: # Write the <tableStyles> element. count = 0 default_table_style = "TableStyleMedium9" default_pivot_style = "PivotStyleLight16" attributes = [ ("count", count), ("defaultTableStyle", default_table_style), ("defaultPivotStyle", default_pivot_style), ] self._xml_empty_tag("tableStyles", attributes) def _write_colors(self) -> None: # Write the <colors> element. custom_colors = self.custom_colors if not custom_colors: return self._xml_start_tag("colors") self._write_mru_colors(custom_colors) self._xml_end_tag("colors") def _write_mru_colors(self, custom_colors) -> None: # Write the <mruColors> element for the most recently used colors. # Write the custom custom_colors in reverse order. custom_colors.reverse() # Limit the mruColors to the last 10. if len(custom_colors) > 10: custom_colors = custom_colors[0:10] self._xml_start_tag("mruColors") # Write the custom custom_colors in reverse order. for color in custom_colors: # For backwards compatibility convert possible self._write_color(color._attributes()) self._xml_end_tag("mruColors") def _write_condense(self) -> None: # Write the <condense> element. attributes = [("val", 0)] self._xml_empty_tag("condense", attributes) def _write_extend(self) -> None: # Write the <extend> element. attributes = [("val", 0)] self._xml_empty_tag("extend", attributes) def _write_xf_format_extensions(self) -> None: # Write the xfComplement <extLst> elements. schema = "http://schemas.microsoft.com/office/spreadsheetml" attributes = [ ("uri", "{C7286773-470A-42A8-94C5-96B5CB345126}"), ( "xmlns:xfpb", schema + "/2022/featurepropertybag", ), ] self._xml_start_tag("extLst") self._xml_start_tag("ext", attributes) self._xml_empty_tag("xfpb:xfComplement", [("i", "0")]) self._xml_end_tag("ext") self._xml_end_tag("extLst") def _write_dxf_format_extensions(self) -> None: # Write the DXFComplement <extLst> elements. schema = "http://schemas.microsoft.com/office/spreadsheetml" attributes = [ ("uri", "{0417FA29-78FA-4A13-93AC-8FF0FAFDF519}"), ( "xmlns:xfpb", schema + "/2022/featurepropertybag", ), ] self._xml_start_tag("extLst") self._xml_start_tag("ext", attributes) self._xml_empty_tag("xfpb:DXFComplement", [("i", "0")]) self._xml_end_tag("ext") self._xml_end_tag("extLst")
Styles
python
joerick__pyinstrument
test/fake_time_util.py
{ "start": 1912, "end": 2400 }
class ____: def __init__(self, clock: "MockClock") -> None: self.trio_clock = clock def get_time(self): return self.trio_clock.current_time() def sleep(self, duration): self.trio_clock.jump(duration) @contextlib.contextmanager def fake_time_trio(): from trio.testing import MockClock trio_clock = MockClock(autojump_threshold=0) fake_clock = FakeClockTrio(trio_clock) with fake_time(fake_clock): yield fake_clock
FakeClockTrio
python
ipython__ipython
tests/test_interactiveshell.py
{ "start": 25270, "end": 25579 }
class ____(ast.NodeTransformer): """Negates all number literals in an AST.""" def visit_Num(self, node): node.value = -node.value return node def visit_Constant(self, node): if isinstance(node.value, int): return self.visit_Num(node) return node
Negator
python
getsentry__sentry
tests/snuba/tagstore/test_tagstore_backend.py
{ "start": 54048, "end": 55467 }
class ____(BaseSemverTest): KEY = SEMVER_BUILD_ALIAS def test_semver_package(self) -> None: env_2 = self.create_environment() project_2 = self.create_project() self.create_release(version="test@1.0.0.0+123", additional_projects=[project_2]) self.create_release(version="test@1.0.0.0+456") self.create_release(version="test@1.2.0.0", environments=[self.environment]) self.create_release(version="test@1.2.1.0+124", environments=[self.environment]) self.create_release(version="test@2.0.0.0+456", environments=[self.environment, env_2]) self.create_release(version="test@2.0.1.0+457a", additional_projects=[project_2]) self.create_release(version="test@2.0.1.0+789", additional_projects=[project_2]) # This shouldn't appear for any semver autocomplete self.create_release(version="test@abc123", additional_projects=[project_2]) self.run_test(None, ["123", "124", "456", "457a", "789"]) self.run_test("", ["123", "124", "456", "457a", "789"]) self.run_test("1", ["123", "124"]) self.run_test("123", ["123"]) self.run_test("4", ["456", "457a"]) self.run_test("1", ["123"], project=project_2) self.run_test("1", ["124"], self.environment) self.run_test("4", ["456", "457a"]) self.run_test("4", ["456"], env_2)
GetTagValuePaginatorForProjectsSemverBuildTest
python
tensorflow__tensorflow
tensorflow/core/function/trace_type/default_types_test.py
{ "start": 953, "end": 1681 }
class ____(trace.TraceType): def __init__(self, obj): self._object = obj def is_subtype_of(self, other): return self._object == 2 and other._object == 3 def most_specific_common_supertype(self, others): if not others: return self if self._object == 2 and isinstance(others[0]._object, int): return MockSupertypes2With3(3) else: return None def placeholder_value(self, placeholder_context=None): raise NotImplementedError def __eq__(self, other) -> bool: return isinstance(other, type(self)) and self._object == other._object def __hash__(self) -> int: return self._object_hash def __repr__(self) -> str: return 'MockSupertypes2With3'
MockSupertypes2With3
python
rapidsai__cudf
python/cudf/cudf/core/udf/masked_typing.py
{ "start": 19974, "end": 20277 }
class ____(AbstractTemplate): key = "MaskedType.replace" def generic(self, args, kws): return nb_signature( MaskedType(managed_udf_string), MaskedType(string_view), MaskedType(string_view), recvr=self.this, )
MaskedStringViewReplace
python
keon__algorithms
algorithms/tree/construct_tree_postorder_preorder.py
{ "start": 795, "end": 2899 }
class ____: def __init__(self, val, left=None, right=None): self.val = val self.left = left self.right = right pre_index = 0 def construct_tree_util(pre: list, post: list, low: int, high: int, size: int): """ Recursive function that constructs tree from preorder and postorder array. preIndex is a global variable that keeps track of the index in preorder array. preorder and postorder array are represented are pre[] and post[] respectively. low and high are the indices for the postorder array. """ global pre_index if pre_index == -1: pre_index = 0 # Base case if pre_index >= size or low > high: return None root = TreeNode(pre[pre_index]) pre_index += 1 # If only one element in the subarray return root if low == high or pre_index >= size: return root # Find the next element of pre[] in post[] i = low while i <= high: if pre[pre_index] == post[i]: break i += 1 # Use index of element present in postorder to divide postorder array # to two parts: left subtree and right subtree if i <= high: root.left = construct_tree_util(pre, post, low, i, size) root.right = construct_tree_util(pre, post, i + 1, high, size) return root def construct_tree(pre: list, post: list, size: int): """ Main Function that will construct the full binary tree from given preorder and postorder array. """ root = construct_tree_util(pre, post, 0, size - 1, size) return print_inorder(root) def print_inorder(root: TreeNode, result=None): """ Prints the tree constructed in inorder format """ if root is None: return [] if result is None: result = [] print_inorder(root.left, result) result.append(root.val) print_inorder(root.right, result) return result if __name__ == "__main__": pre = [1, 2, 4, 5, 3, 6, 7] post = [4, 5, 2, 6, 7, 3, 1] size = len(pre) result = construct_tree(pre, post, size) print(result)
TreeNode
python
django__django
django/template/defaulttags.py
{ "start": 13517, "end": 13806 }
class ____(Node): def __init__(self, partial_name, inline, nodelist): self.partial_name = partial_name self.inline = inline self.nodelist = nodelist def render(self, context): return self.nodelist.render(context) if self.inline else ""
PartialDefNode
python
pyparsing__pyparsing
examples/statemachine/statemachine.py
{ "start": 390, "end": 8863 }
class ____(Exception): pass ident = pp.Word(pp.alphas + "_", pp.alphanums + "_$") # add parse-time condition to make sure we do not allow any Python keywords to be used as # statemachine identifiers def no_keywords_allowed(s, l, t): wd = t[0] return not keyword.iskeyword(wd) ident.addCondition( no_keywords_allowed, message="cannot use a Python keyword for state or transition identifier", ) stateTransition = ident("from_state") + "->" + ident("to_state") stateMachine = ( pp.Keyword("statemachine") + ident("name") + ":" + pp.OneOrMore(pp.Group(stateTransition))("transitions") ) namedStateTransition = ( ident("from_state") + "-(" + ident("transition") + ")->" + ident("to_state") ) namedStateMachine = ( pp.Keyword("statemachine") + ident("name") + ":" + pp.OneOrMore(pp.Group(namedStateTransition))("transitions") ) def expand_state_definition(source, loc, tokens): """ Parse action to convert statemachine to corresponding Python classes and methods """ indent = " " * (pp.col(loc, source) - 1) statedef = [] # build list of states states = set() fromTo = {} for tn in tokens.transitions: states.add(tn.from_state) states.add(tn.to_state) fromTo[tn.from_state] = tn.to_state # define base class for state classes baseStateClass = tokens.name statedef.extend( [ f"class {baseStateClass}(object):", " def __str__(self):", " return self.__class__.__name__", " @classmethod", " def states(cls):", " return list(cls.__subclasses__())", " def next_state(self):", " return self._next_state_class()", ] ) # define all state classes statedef.extend("class {}({}): pass".format(s, baseStateClass) for s in states) # define state->state transitions statedef.extend( "{}._next_state_class = {}".format(s, fromTo[s]) for s in states if s in fromTo ) statedef.extend( [ "class {baseStateClass}Mixin:".format(baseStateClass=baseStateClass), " def __init__(self):", " self._state = None", " def initialize_state(self, init_state):", " if issubclass(init_state, {baseStateClass}):".format( baseStateClass=baseStateClass ), " init_state = init_state()", " self._state = init_state", " @property", " def state(self):", " return self._state", " # get behavior/properties from current state", " def __getattr__(self, attrname):", " attr = getattr(self._state, attrname)", " return attr", " def __str__(self):", " return '{0}: {1}'.format(self.__class__.__name__, self._state)", ] ) return ("\n" + indent).join(statedef) + "\n" stateMachine.setParseAction(expand_state_definition) def expand_named_state_definition(source, loc, tokens): """ Parse action to convert statemachine with named transitions to corresponding Python classes and methods """ indent = " " * (pp.col(loc, source) - 1) statedef = [] # build list of states and transitions states = set() transitions = set() baseStateClass = tokens.name fromTo = {} for tn in tokens.transitions: states.add(tn.from_state) states.add(tn.to_state) transitions.add(tn.transition) if tn.from_state in fromTo: fromTo[tn.from_state][tn.transition] = tn.to_state else: fromTo[tn.from_state] = {tn.transition: tn.to_state} # add entries for terminal states for s in states: if s not in fromTo: fromTo[s] = {} # define state transition class statedef.extend( [ "class {baseStateClass}Transition:".format(baseStateClass=baseStateClass), " def __str__(self):", " return self.transitionName", ] ) statedef.extend( "{tn_name} = {baseStateClass}Transition()".format( tn_name=tn, baseStateClass=baseStateClass ) for tn in transitions ) statedef.extend( "{tn_name}.transitionName = '{tn_name}'".format(tn_name=tn) for tn in transitions ) # define base class for state classes statedef.extend( [ f"class {baseStateClass}(object):", " from statemachine import InvalidTransitionException as BaseTransitionException", " class InvalidTransitionException(BaseTransitionException): pass", " def __str__(self):", " return self.__class__.__name__", " @classmethod", " def states(cls):", " return list(cls.__subclasses__())", " @classmethod", " def next_state(cls, name):", " try:", " return cls.tnmap[name]()", " except KeyError:", " raise cls.InvalidTransitionException(f'{cls.__name__} does not support transition {name!r}'", " def __bad_tn(name):", " def _fn(cls):", " raise cls.InvalidTransitionException(f'{cls.__name__} does not support transition {name!r}'", " _fn.__name__ = name", " return _fn", ] ) # define default 'invalid transition' methods in base class, valid transitions will be implemented in subclasses statedef.extend( " {tn_name} = classmethod(__bad_tn({tn_name!r}))".format(tn_name=tn) for tn in transitions ) # define all state classes statedef.extend("class {}({}): pass".format(s, baseStateClass) for s in states) # define state transition methods for valid transitions from each state for s in states: trns = list(fromTo[s].items()) # statedef.append(f"{s}.tnmap = {{{', '.join('%s:%s' % tn for tn in trns)}}}") statedef.extend( f"{s}.{tn_} = classmethod(lambda cls: {to_}())" for tn_, to_ in trns ) statedef.extend( [ "{baseStateClass}.transitions = classmethod(lambda cls: [{transition_class_list}])".format( baseStateClass=baseStateClass, transition_class_list=", ".join( "cls.{}".format(tn) for tn in transitions ), ), "{baseStateClass}.transition_names = [tn.__name__ for tn in {baseStateClass}.transitions()]".format( baseStateClass=baseStateClass ), ] ) # define <state>Mixin class for application classes that delegate to the state statedef.extend( [ "class {baseStateClass}Mixin:".format(baseStateClass=baseStateClass), " def __init__(self):", " self._state = None", " def initialize_state(self, init_state):", " if issubclass(init_state, {baseStateClass}):".format( baseStateClass=baseStateClass ), " init_state = init_state()", " self._state = init_state", " @property", " def state(self):", " return self._state", " # get behavior/properties from current state", " def __getattr__(self, attrname):", " attr = getattr(self._state, attrname)", " return attr", " def __str__(self):", " return '{0}: {1}'.format(self.__class__.__name__, self._state)", ] ) # define transition methods to be delegated to the _state instance variable statedef.extend( " def {tn_name}(self): self._state = self._state.{tn_name}()".format( tn_name=tn ) for tn in transitions ) return ("\n" + indent).join(statedef) + "\n" namedStateMachine.setParseAction(expand_named_state_definition) # ====================================================================== # NEW STUFF - Matt Anderson, 2009-11-26 # ======================================================================
InvalidTransitionException
python
tensorflow__tensorflow
tensorflow/python/debug/cli/cli_config_test.py
{ "start": 999, "end": 5475 }
class ____(test_util.TensorFlowTestCase): def setUp(self): self._tmp_dir = tempfile.mkdtemp() self._tmp_config_path = os.path.join(self._tmp_dir, ".tfdbg_config") self.assertFalse(gfile.Exists(self._tmp_config_path)) super(CLIConfigTest, self).setUp() def tearDown(self): file_io.delete_recursively(self._tmp_dir) super(CLIConfigTest, self).tearDown() def testConstructCLIConfigWithoutFile(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) self.assertEqual(20, config.get("graph_recursion_depth")) self.assertEqual(True, config.get("mouse_mode")) with self.assertRaises(KeyError): config.get("property_that_should_not_exist") self.assertTrue(gfile.Exists(self._tmp_config_path)) def testCLIConfigForwardCompatibilityTest(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) with open(self._tmp_config_path, "rt") as f: config_json = json.load(f) # Remove a field to simulate forward compatibility test. del config_json["graph_recursion_depth"] with open(self._tmp_config_path, "wt") as f: json.dump(config_json, f) config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) self.assertEqual(20, config.get("graph_recursion_depth")) def testModifyConfigValue(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) config.set("graph_recursion_depth", 9) config.set("mouse_mode", False) self.assertEqual(9, config.get("graph_recursion_depth")) self.assertEqual(False, config.get("mouse_mode")) def testModifyConfigValueWithTypeCasting(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) config.set("graph_recursion_depth", "18") config.set("mouse_mode", "false") self.assertEqual(18, config.get("graph_recursion_depth")) self.assertEqual(False, config.get("mouse_mode")) def testModifyConfigValueWithTypeCastingFailure(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) with self.assertRaises(ValueError): config.set("mouse_mode", "maybe") def testLoadFromModifiedConfigFile(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) config.set("graph_recursion_depth", 9) config.set("mouse_mode", False) config2 = cli_config.CLIConfig(config_file_path=self._tmp_config_path) self.assertEqual(9, config2.get("graph_recursion_depth")) self.assertEqual(False, config2.get("mouse_mode")) def testSummarizeFromConfig(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) output = config.summarize() self.assertEqual( ["Command-line configuration:", "", " graph_recursion_depth: %d" % config.get("graph_recursion_depth"), " mouse_mode: %s" % config.get("mouse_mode")], output.lines) def testSummarizeFromConfigWithHighlight(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) output = config.summarize(highlight="mouse_mode") self.assertEqual( ["Command-line configuration:", "", " graph_recursion_depth: %d" % config.get("graph_recursion_depth"), " mouse_mode: %s" % config.get("mouse_mode")], output.lines) self.assertEqual((2, 12, ["underline", "bold"]), output.font_attr_segs[3][0]) self.assertEqual((14, 18, "bold"), output.font_attr_segs[3][1]) def testSetCallback(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) test_value = {"graph_recursion_depth": -1} def callback(config): test_value["graph_recursion_depth"] = config.get("graph_recursion_depth") config.set_callback("graph_recursion_depth", callback) config.set("graph_recursion_depth", config.get("graph_recursion_depth") - 1) self.assertEqual(test_value["graph_recursion_depth"], config.get("graph_recursion_depth")) def testSetCallbackInvalidPropertyName(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) with self.assertRaises(KeyError): config.set_callback("nonexistent_property_name", print) def testSetCallbackNotCallable(self): config = cli_config.CLIConfig(config_file_path=self._tmp_config_path) with self.assertRaises(TypeError): config.set_callback("graph_recursion_depth", 1) if __name__ == "__main__": googletest.main()
CLIConfigTest
python
ZoranPandovski__al-go-rithms
games/Python/Pong Game/paddle.py
{ "start": 44, "end": 516 }
class ____(Turtle): def __init__(self,x_pos,y_pos): super().__init__() self.penup() # self.speed(0) self.goto(x=x_pos,y=y_pos) self.shape("square") self.color("white") self.shapesize(stretch_wid=HEIGHT,stretch_len=WEIDTH) def move_up(self): new_y=self.ycor()+20 self.goto(x=self.xcor(),y=new_y) def move_down(self): new_y=self.ycor()-20 self.goto(x=self.xcor(),y=new_y)
Paddle
python
huggingface__transformers
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
{ "start": 77044, "end": 78669 }
class ____(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if is_peft_available(): from peft.tuners.lora import LoraLayer if is_peft_available(): if isinstance(self.kernel, LoraLayer): warnings.warn( "Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. " "You should exclude TDNNLayer from LoRA's target modules.", ) # for backward compatibility, we keep nn.Linear but call F.conv1d for speed up hidden_states = hidden_states.transpose(1, 2) weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2) hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.activation(hidden_states) return hidden_states @auto_docstring( custom_intro=""" Wav2Vec2Conformer Model with an XVector feature extraction head on top for tasks like Speaker Verification. """ )
TDNNLayer
python
coleifer__peewee
peewee.py
{ "start": 162757, "end": 163155 }
class ____(Field): def adapt(self, value): if isinstance(value, text_type): return value elif isinstance(value, bytes_type): return value.decode('utf-8') return text_type(value) def __add__(self, other): return StringExpression(self, OP.CONCAT, other) def __radd__(self, other): return StringExpression(other, OP.CONCAT, self)
_StringField
python
coleifer__peewee
examples/twitter/app.py
{ "start": 2733, "end": 3254 }
class ____(BaseModel): from_user = ForeignKeyField(User, backref='relationships') to_user = ForeignKeyField(User, backref='related_to') class Meta: indexes = ( # Specify a unique multi-column index on from/to-user. (('from_user', 'to_user'), True), ) # a dead simple one-to-many relationship: one user has 0..n messages, exposed by # the foreign key. because we didn't specify, a users messages will be accessible # as a special attribute, User.message_set
Relationship
python
Netflix__metaflow
metaflow/plugins/aws/secrets_manager/aws_secrets_manager_secrets_provider.py
{ "start": 1515, "end": 8302 }
class ____(SecretsProvider): TYPE = "aws-secrets-manager" def get_secret_as_dict(self, secret_id, options={}, role=None): """ Reads a secret from AWS Secrets Manager and returns it as a dictionary of environment variables. The secret payload from AWS is EITHER a string OR a binary blob. If the secret contains a string payload ("SecretString"): - if the `json` option is True (default): {SecretString} will be parsed as a JSON. If successfully parsed, AND the JSON contains a top-level object, each entry K/V in the object will also be converted to an entry in the result. V will always be casted to a string (if not already a string). - If `json` option is False: {SecretString} will be returned as a single entry in the result, where the key is either: - the `secret_id`, OR - the value set by `options={"env_var_name": custom_env_var_name}`. Otherwise, if the secret contains a binary blob payload ("SecretBinary"): - The result dict contains '{SecretName}': '{SecretBinary}', where {SecretBinary} is a base64-encoded string. All keys in the result are sanitized to be more valid environment variable names. This is done on a best-effort basis. Further validation is expected to be done by the invoking @secrets decorator itself. :param secret_id: ARN or friendly name of the secret. :param options: Dictionary of additional options. E.g., `options={"env_var_name": custom_env_var_name}`. :param role: AWS IAM Role ARN to assume before reading the secret. :return: Dictionary of environment variables. All keys and values are strings. """ import botocore from metaflow.plugins.aws.aws_client import get_aws_client effective_aws_region = None # arn:aws:secretsmanager:<Region>:<AccountId>:secret:SecretName-6RandomCharacters m = re.match("arn:aws:secretsmanager:([^:]+):", secret_id) if m: effective_aws_region = m.group(1) elif "region" in options: effective_aws_region = options["region"] else: effective_aws_region = AWS_SECRETS_MANAGER_DEFAULT_REGION # At the end of all that, `effective_aws_region` may still be None. # This might still be OK, if there is fallback AWS region info in environment like: # .aws/config or AWS_REGION env var or AWS_DEFAULT_REGION env var, etc. try: if AWS_SECRETS_MANAGER_DEFAULT_ROLE and not role: role = AWS_SECRETS_MANAGER_DEFAULT_ROLE secrets_manager_client = get_aws_client( "secretsmanager", client_params={"region_name": effective_aws_region}, role_arn=role, ) except botocore.exceptions.NoRegionError: # We try our best with a nice error message. # When run in Kubernetes or Argo Workflows, the traceback is still monstrous. # TODO: Find a way to show a concise error in logs raise MetaflowException( "Default region is not specified for AWS Secrets Manager. Please set METAFLOW_AWS_SECRETS_MANAGER_DEFAULT_REGION" ) result = {} def _sanitize_and_add_entry_to_result(k, v): # Two jobs - sanitize, and check for dupes sanitized_k = _sanitize_key_as_env_var(k) if sanitized_k in result: raise MetaflowAWSSecretsManagerDuplicateKey( "Duplicate key in secret: '%s' (sanitizes to '%s')" % (k, sanitized_k) ) result[sanitized_k] = v """ These are the exceptions that can be raised by the AWS SDK: SecretsManager.Client.exceptions.ResourceNotFoundException SecretsManager.Client.exceptions.InvalidParameterException SecretsManager.Client.exceptions.InvalidRequestException SecretsManager.Client.exceptions.DecryptionFailure SecretsManager.Client.exceptions.InternalServiceError Looks pretty informative already, so we won't catch here directly. 1/27/2023(jackie) - We will evolve this over time as we learn more. """ response = secrets_manager_client.get_secret_value(SecretId=secret_id) if "Name" not in response: raise MetaflowAWSSecretsManagerBadResponse( "Secret 'Name' is missing in response" ) secret_name = response["Name"] if "SecretString" in response: secret_str = response["SecretString"] if options.get("json", True): try: obj = json.loads(secret_str) if type(obj) == dict: for k, v in obj.items(): # We try to make it work here - cast to string always _sanitize_and_add_entry_to_result(k, str(v)) else: raise MetaflowAWSSecretsManagerNotJSONObject( "Secret string is a JSON, but not an object (dict-like) - actual type %s." % type(obj) ) except JSONDecodeError: raise MetaflowAWSSecretsManagerJSONParseError( "Secret string could not be parsed as JSON" ) else: if options.get("env_var_name"): env_var_name = options["env_var_name"] else: env_var_name = secret_name _sanitize_and_add_entry_to_result(env_var_name, secret_str) elif "SecretBinary" in response: # boto3 docs say response gives base64 encoded, but it's wrong. # See https://github.com/boto/boto3/issues/2735 # In reality, we get raw bytes. We will encode it ourselves to become env var ready. # Note env vars values may not contain null bytes.... therefore we cannot leave it as # bytes. # # The trailing decode gives us a final UTF-8 string. if options.get("env_var_name"): env_var_name = options["env_var_name"] else: env_var_name = secret_name _sanitize_and_add_entry_to_result( env_var_name, base64.b64encode(response["SecretBinary"]).decode() ) else: raise MetaflowAWSSecretsManagerBadResponse( "Secret response is missing both 'SecretString' and 'SecretBinary'" ) return result
AwsSecretsManagerSecretsProvider
python
PrefectHQ__prefect
src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py
{ "start": 24643, "end": 24732 }
class ____(BaseWorkerResult): """ The result of an ECS job. """
ECSWorkerResult
python
kubernetes-client__python
kubernetes/client/models/v1beta1_resource_claim_list.py
{ "start": 383, "end": 7093 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'items': 'list[V1beta1ResourceClaim]', 'kind': 'str', 'metadata': 'V1ListMeta' } attribute_map = { 'api_version': 'apiVersion', 'items': 'items', 'kind': 'kind', 'metadata': 'metadata' } def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501 """V1beta1ResourceClaimList - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._items = None self._kind = None self._metadata = None self.discriminator = None if api_version is not None: self.api_version = api_version self.items = items if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata @property def api_version(self): """Gets the api_version of this V1beta1ResourceClaimList. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1beta1ResourceClaimList. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1beta1ResourceClaimList. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1beta1ResourceClaimList. # noqa: E501 :type: str """ self._api_version = api_version @property def items(self): """Gets the items of this V1beta1ResourceClaimList. # noqa: E501 Items is the list of resource claims. # noqa: E501 :return: The items of this V1beta1ResourceClaimList. # noqa: E501 :rtype: list[V1beta1ResourceClaim] """ return self._items @items.setter def items(self, items): """Sets the items of this V1beta1ResourceClaimList. Items is the list of resource claims. # noqa: E501 :param items: The items of this V1beta1ResourceClaimList. # noqa: E501 :type: list[V1beta1ResourceClaim] """ if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501 raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501 self._items = items @property def kind(self): """Gets the kind of this V1beta1ResourceClaimList. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1beta1ResourceClaimList. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1beta1ResourceClaimList. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1beta1ResourceClaimList. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1beta1ResourceClaimList. # noqa: E501 :return: The metadata of this V1beta1ResourceClaimList. # noqa: E501 :rtype: V1ListMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1beta1ResourceClaimList. :param metadata: The metadata of this V1beta1ResourceClaimList. # noqa: E501 :type: V1ListMeta """ self._metadata = metadata def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1ResourceClaimList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1ResourceClaimList): return True return self.to_dict() != other.to_dict()
V1beta1ResourceClaimList
python
django__django
tests/model_inheritance/models.py
{ "start": 3639, "end": 3689 }
class ____(models.Model, Mixin): pass
MixinModel
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/cache_test.py
{ "start": 1836, "end": 8134 }
class ____(test_base.DatasetTestBase, parameterized.TestCase): def setUp(self): super(FileCacheTest, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.cache_prefix = path.join(self.tmp_dir, "cache") def tearDown(self): if self.tmp_dir: shutil.rmtree(self.tmp_dir, ignore_errors=True) super(FileCacheTest, self).tearDown() @combinations.generate(test_base.default_test_combinations()) def testCacheDatasetPassthrough(self): components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]), np.array([9.0, 10.0, 11.0, 12.0])) def dataset_fn(count=5, filename=None): repeat_dataset = ( dataset_ops.Dataset.from_tensor_slices(components).repeat(count)) if filename: return repeat_dataset.cache(filename) else: return repeat_dataset self.assertEqual( tuple([c.shape[1:] for c in components]), dataset_ops.get_legacy_output_shapes(dataset_fn())) get_next = self.getNext(dataset_fn()) # First run without caching to collect the "ground truth". elements = [] for _ in range(20): elements.append(self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Assert that the cached dataset has the same elements as the # "ground truth". get_next = self.getNext(dataset_fn(filename=self.cache_prefix)) cached_elements = [] for _ in range(20): cached_elements.append(self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) self.assertAllEqual(elements, cached_elements) # Re-initialize with an empty upstream (to throw errors.OutOfRangeError # if we didn't use the cache). get_next = self.getNext(dataset_fn(count=0, filename=self.cache_prefix)) replayed_elements = [] for _ in range(20): replayed_elements.append(self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) self.assertEqual(cached_elements, replayed_elements) # Re-initialize with an empty upstream and a missing cache file (should # throw errors.OutOfRangeError immediately). get_next = self.getNext( dataset_fn(count=0, filename=self.cache_prefix + "nonsense")) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @combinations.generate(test_base.default_test_combinations()) def testConcurrentWriters(self): components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]), np.array([9.0, 10.0, 11.0, 12.0])) cache_dataset1 = ( dataset_ops.Dataset.from_tensor_slices(components).cache( self.cache_prefix)) cache_dataset2 = ( dataset_ops.Dataset.from_tensor_slices(components).cache( self.cache_prefix)) get_next1 = self.getNext(cache_dataset1) get_next2 = self.getNext(cache_dataset2) self.evaluate(get_next1()) # this should succeed with self.assertRaises(errors.AlreadyExistsError): self.evaluate(get_next2()) self.evaluate(get_next1()) # this should continue to succeed @combinations.generate(test_base.default_test_combinations()) def testConcurrentReaders(self): components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]), np.array([9.0, 10.0, 11.0, 12.0])) cache_dataset1 = ( dataset_ops.Dataset.from_tensor_slices(components).cache( self.cache_prefix)) cache_dataset2 = ( dataset_ops.Dataset.from_tensor_slices(components).cache( self.cache_prefix)) get_next1 = self.getNext(cache_dataset1) get_next2 = self.getNext(cache_dataset2) elements = [] for _ in range(4): elements.append(self.evaluate(get_next1())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next1()) # Re-initialize get_next1 = self.getNext(cache_dataset1, requires_initialization=True) get_next2 = self.getNext(cache_dataset2, requires_initialization=True) # Reading concurrently should succeed. elements_itr1 = [] elements_itr2 = [] elements_itr2.append(self.evaluate(get_next2())) elements_itr1.append(self.evaluate(get_next1())) elements_itr2.append(self.evaluate(get_next2())) elements_itr1.append(self.evaluate(get_next1())) # Intentionally reversing the order elements_itr1.append(self.evaluate(get_next1())) elements_itr2.append(self.evaluate(get_next2())) elements_itr1.append(self.evaluate(get_next1())) elements_itr2.append(self.evaluate(get_next2())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next2()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next1()) self.assertAllEqual(elements, elements_itr1) self.assertAllEqual(elements, elements_itr2) @combinations.generate(test_base.default_test_combinations()) def testReadingPastEndOfSequence(self): dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix) dataset = dataset.map(lambda a: a).batch(4).repeat(2) expected_output = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] * 2 self.assertDatasetProduces(dataset, expected_output) @combinations.generate(test_base.default_test_combinations()) def testCacheZipped(self): def make_dataset(i): cache_path = self.cache_prefix + "_" + str(i) return dataset_ops.Dataset.range(100).shuffle(100).cache(cache_path) datasets = [make_dataset(i) for i in range(3)] dataset = dataset_ops.Dataset.zip(tuple(datasets)) first_order = self.getDatasetOutput(dataset) second_order = self.getDatasetOutput(dataset) self.assertEqual(first_order, second_order) @combinations.generate(test_base.default_test_combinations()) def testCleaningUpCacheFiles(self): def do_test(i): dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix) get_next = self.getNext(dataset) for _ in range(i): try: self.evaluate(get_next()) except errors.OutOfRangeError: break if not context.executing_eagerly(): self.skipTest( "Test requires eager mode for iterators to be deconstructed") for i in [0, 3, 10, 12, 15]: do_test(i)
FileCacheTest
python
python-poetry__poetry
src/poetry/utils/env/env_manager.py
{ "start": 2500, "end": 22203 }
class ____: """ Environments manager """ _env = None ENVS_FILE = "envs.toml" def __init__(self, poetry: Poetry, io: None | IO = None) -> None: self._poetry = poetry self._io = io or NullIO() @property def in_project_venv(self) -> Path: venv: Path = self._poetry.file.path.parent / ".venv" return venv @cached_property def envs_file(self) -> EnvsFile: return EnvsFile(self._poetry.config.virtualenvs_path / self.ENVS_FILE) @cached_property def base_env_name(self) -> str: return self.generate_env_name( self._poetry.package.name, str(self._poetry.file.path.parent), ) def activate(self, python: str) -> Env: venv_path = self._poetry.config.virtualenvs_path python_instance = Python.get_by_name(python) if python_instance is None: raise PythonVersionNotFoundError(python) create = False # If we are required to create the virtual environment in the project directory, # create or recreate it if needed if self.use_in_project_venv(): create = False venv = self.in_project_venv if venv.exists(): # We need to check if the patch version is correct _venv = VirtualEnv(venv) current_patch = ".".join(str(v) for v in _venv.version_info[:3]) if python_instance.patch_version.to_string() != current_patch: create = True self.create_venv(python=python_instance, force=create) return self.get(reload=True) envs = tomlkit.document() if self.envs_file.exists(): envs = self.envs_file.read() current_env = envs.get(self.base_env_name) if current_env is not None: current_minor = current_env["minor"] current_patch = current_env["patch"] if ( current_minor == python_instance.minor_version.to_string() and current_patch != python_instance.patch_version.to_string() ): # We need to recreate create = True venv = ( venv_path / f"{self.base_env_name}-py{python_instance.minor_version.to_string()}" ) # Create if needed if not venv.exists() or create: in_venv = os.environ.get("VIRTUAL_ENV") is not None if in_venv or not venv.exists(): create = True if venv.exists(): # We need to check if the patch version is correct _venv = VirtualEnv(venv) current_patch = ".".join(str(v) for v in _venv.version_info[:3]) if python_instance.patch_version.to_string() != current_patch: create = True self.create_venv(python=python_instance, force=create) # Activate envs[self.base_env_name] = { "minor": python_instance.minor_version.to_string(), "patch": python_instance.patch_version.to_string(), } self.envs_file.write(envs) return self.get(reload=True) def deactivate(self) -> None: venv_path = self._poetry.config.virtualenvs_path if self.envs_file.exists() and ( minor := self.envs_file.remove_section(self.base_env_name) ): venv = venv_path / f"{self.base_env_name}-py{minor}" self._io.write_error_line( f"Deactivating virtualenv: <comment>{venv}</comment>" ) def get(self, reload: bool = False) -> Env: if self._env is not None and not reload: return self._env python_minor: str | None = None env = None envs = None if self.envs_file.exists(): envs = self.envs_file.read() env = envs.get(self.base_env_name) if env: python_minor = env["minor"] # Check if we are inside a virtualenv or not # Conda sets CONDA_PREFIX in its envs, see # https://github.com/conda/conda/issues/2764 env_prefix = os.environ.get("VIRTUAL_ENV", os.environ.get("CONDA_PREFIX")) conda_env_name = os.environ.get("CONDA_DEFAULT_ENV") # It's probably not a good idea to pollute Conda's global "base" env, since # most users have it activated all the time. in_venv = env_prefix is not None and conda_env_name != "base" if not in_venv or env is not None: # Checking if a local virtualenv exists if self.in_project_venv_exists(): venv = self.in_project_venv return VirtualEnv(venv) create_venv = self._poetry.config.get("virtualenvs.create", True) if not create_venv: return self.get_system_env() venv_path = self._poetry.config.virtualenvs_path if python_minor is None: # we only need to discover python version in this case python = Python.get_preferred_python( config=self._poetry.config, io=self._io ) python_minor = python.minor_version.to_string() name = f"{self.base_env_name}-py{python_minor.strip()}" venv = venv_path / name if not venv.exists(): if env and envs: del envs[self.base_env_name] self.envs_file.write(envs) return self.get_system_env() return VirtualEnv(venv) if env_prefix is not None: prefix = Path(env_prefix) base_prefix = None else: prefix = Path(sys.prefix) base_prefix = self.get_base_prefix() return VirtualEnv(prefix, base_prefix) def list(self, name: str | None = None) -> list[VirtualEnv]: if name is None: name = self._poetry.package.name venv_name = self.generate_env_name(name, str(self._poetry.file.path.parent)) venv_path = self._poetry.config.virtualenvs_path env_list = [VirtualEnv(p) for p in sorted(venv_path.glob(f"{venv_name}-py*"))] if self.in_project_venv_exists(): venv = self.in_project_venv env_list.insert(0, VirtualEnv(venv)) return env_list @staticmethod def check_env_is_for_current_project(env: str, base_env_name: str) -> bool: """ Check if env name starts with projects name. This is done to prevent action on other project's envs. """ return env.startswith(base_env_name) def remove(self, python: str) -> Env: python_path = Path(python) if python_path.is_file(): # Validate env name if provided env is a full path to python try: encoding = "locale" if sys.version_info >= (3, 10) else None env_dir = subprocess.check_output( [python, "-c", GET_ENV_PATH_ONELINER], text=True, encoding=encoding ).strip("\n") env_name = Path(env_dir).name if not self.check_env_is_for_current_project( env_name, self.base_env_name ): raise IncorrectEnvError(env_name) except CalledProcessError as e: raise EnvCommandError(e) if self.check_env_is_for_current_project(python, self.base_env_name): venvs = self.list() for venv in venvs: if venv.path.name == python: # Exact virtualenv name if self.envs_file.exists(): venv_minor = ".".join(str(v) for v in venv.version_info[:2]) self.envs_file.remove_section(self.base_env_name, venv_minor) self.remove_venv(venv.path) return venv raise ValueError( f'<warning>Environment "{python}" does not exist.</warning>' ) else: venv_path = self._poetry.config.virtualenvs_path # Get all the poetry envs, even for other projects env_names = [p.name for p in sorted(venv_path.glob("*-*-py*"))] if python in env_names: raise IncorrectEnvError(python) try: python_version = Version.parse(python) python = f"python{python_version.major}" if python_version.precision > 1: python += f".{python_version.minor}" except ValueError: # Executable in PATH or full executable path pass try: encoding = "locale" if sys.version_info >= (3, 10) else None python_version_string = subprocess.check_output( [python, "-c", GET_PYTHON_VERSION_ONELINER], text=True, encoding=encoding, ) except CalledProcessError as e: raise EnvCommandError(e) python_version = Version.parse(python_version_string.strip()) minor = f"{python_version.major}.{python_version.minor}" name = f"{self.base_env_name}-py{minor}" venv_path = venv_path / name if not venv_path.exists(): raise ValueError(f'<warning>Environment "{name}" does not exist.</warning>') if self.envs_file.exists(): self.envs_file.remove_section(self.base_env_name, minor) self.remove_venv(venv_path) return VirtualEnv(venv_path, venv_path) def use_in_project_venv(self) -> bool: in_project: bool | None = self._poetry.config.get("virtualenvs.in-project") if in_project is not None: return in_project return self.in_project_venv.is_dir() def in_project_venv_exists(self) -> bool: in_project: bool | None = self._poetry.config.get("virtualenvs.in-project") if in_project is False: return False return self.in_project_venv.is_dir() def create_venv( self, name: str | None = None, python: Python | None = None, force: bool = False, ) -> Env: if self._env is not None and not force: return self._env cwd = self._poetry.file.path.parent env = self.get(reload=True) if not env.is_sane(): force = True if env.is_venv() and not force: # Already inside a virtualenv. current_python = Version.parse( ".".join(str(c) for c in env.version_info[:3]) ) if not self._poetry.package.python_constraint.allows(current_python): raise InvalidCurrentPythonVersionError( self._poetry.package.python_versions, str(current_python) ) return env create_venv = self._poetry.config.get("virtualenvs.create") in_project_venv = self.use_in_project_venv() use_poetry_python = self._poetry.config.get("virtualenvs.use-poetry-python") venv_prompt = self._poetry.config.get("virtualenvs.prompt") specific_python_requested = python is not None if not python: python = Python.get_preferred_python( config=self._poetry.config, io=self._io ) venv_path = ( self.in_project_venv if in_project_venv else self._poetry.config.virtualenvs_path ) if not name: name = self._poetry.package.name supported_python = self._poetry.package.python_constraint if not supported_python.allows(python.patch_version): # The currently activated or chosen Python version # is not compatible with the Python constraint specified # for the project. # If an executable has been specified, we stop there # and notify the user of the incompatibility. # Otherwise, we try to find a compatible Python version. if specific_python_requested and use_poetry_python: raise NoCompatiblePythonVersionFoundError( self._poetry.package.python_versions, python.patch_version.to_string(), ) self._io.write_error_line( f"<warning>The currently activated Python version {python.patch_version.to_string()} is not" f" supported by the project ({self._poetry.package.python_versions}).\n" "Trying to find and use a compatible version.</warning> " ) python = Python.get_compatible_python(poetry=self._poetry, io=self._io) if in_project_venv: venv = venv_path else: name = self.generate_env_name(name, str(cwd)) name = f"{name}-py{python.minor_version.to_string()}" venv = venv_path / name if venv_prompt is not None: venv_prompt = venv_prompt.format( project_name=self._poetry.package.name or "virtualenv", python_version=python.minor_version.to_string(), ) if not venv.exists(): if create_venv is False: self._io.write_error_line( "<fg=black;bg=yellow>" "Skipping virtualenv creation, " "as specified in config file." "</>" ) return self.get_system_env() self._io.write_error_line( f"Creating virtualenv <c1>{name}</> in" f" {venv_path if not WINDOWS else get_real_windows_path(venv_path)!s}" ) else: create_venv = False if force: if not env.is_sane(): self._io.write_error_line( f"<warning>The virtual environment found in {env.path} seems to" " be broken.</warning>" ) self._io.write_error_line( f"Recreating virtualenv <c1>{name}</> in {venv!s}" ) self.remove_venv(venv) create_venv = True elif self._io.is_very_verbose(): self._io.write_error_line(f"Virtualenv <c1>{name}</> already exists.") if create_venv: self.build_venv( venv, executable=python.executable, flags=self._poetry.config.get("virtualenvs.options"), prompt=venv_prompt, ) # venv detection: # stdlib venv may symlink sys.executable, so we can't use realpath. # but others can symlink *to* the venv Python, # so we can't just use sys.executable. # So we just check every item in the symlink tree (generally <= 3) p = os.path.normcase(sys.executable) paths = [p] while os.path.islink(p): p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p))) paths.append(p) p_venv = os.path.normcase(str(venv)) if any(p.startswith(p_venv) for p in paths): # Running properly in the virtualenv, don't need to do anything return self.get_system_env() return VirtualEnv(venv) @classmethod def build_venv( cls, path: Path, executable: Path | None = None, flags: dict[str, str | bool] | None = None, with_pip: bool | None = None, prompt: str | None = None, ) -> virtualenv.run.session.Session: flags = flags or {} if with_pip is not None: flags["no-pip"] = not with_pip flags.setdefault("no-pip", True) flags.setdefault("no-setuptools", True) flags.setdefault("no-wheel", True) if WINDOWS: path = get_real_windows_path(path) executable = get_real_windows_path(executable) if executable else None executable_str = None if executable is None else executable.resolve().as_posix() args = [ "--no-download", "--no-periodic-update", "--python", executable_str or sys.executable, ] if prompt is not None: args.extend(["--prompt", prompt]) for flag, value in flags.items(): if value is True: args.append(f"--{flag}") elif value is not False: args.append(f"--{flag}={value}") args.append(str(path)) cli_result = virtualenv.cli_run(args, setup_logging=False) # Exclude the venv folder from from macOS Time Machine backups # TODO: Add backup-ignore markers for other platforms too if sys.platform == "darwin": import xattr xattr.setxattr( str(path), "com.apple.metadata:com_apple_backup_excludeItem", plistlib.dumps("com.apple.backupd", fmt=plistlib.FMT_BINARY), ) return cli_result @classmethod def remove_venv(cls, path: Path) -> None: assert path.is_dir() try: remove_directory(path) return except OSError as e: # Continue only if e.errno == 16 if e.errno != 16: # ERRNO 16: Device or resource busy raise e # Delete all files and folders but the toplevel one. This is because sometimes # the venv folder is mounted by the OS, such as in a docker volume. In such # cases, an attempt to delete the folder itself will result in an `OSError`. # See https://github.com/python-poetry/poetry/pull/2064 for file_path in path.iterdir(): if file_path.is_file() or file_path.is_symlink(): file_path.unlink() elif file_path.is_dir(): remove_directory(file_path, force=True) @classmethod def get_system_env(cls, naive: bool = False) -> Env: """ Retrieve the current Python environment. This can be the base Python environment or an activated virtual environment. This method also workaround the issue that the virtual environment used by Poetry internally (when installed via the custom installer) is incorrectly detected as the system environment. Note that this workaround happens only when `naive` is False since there are times where we actually want to retrieve Poetry's custom virtual environment (e.g. plugin installation or self update). """ prefix, base_prefix = Path(sys.prefix), Path(cls.get_base_prefix()) env: Env = SystemEnv(prefix) if not naive: env = GenericEnv(base_prefix, child_env=env) return env @classmethod def get_base_prefix(cls) -> Path: real_prefix = getattr(sys, "real_prefix", None) if real_prefix is not None: return Path(real_prefix) base_prefix = getattr(sys, "base_prefix", None) if base_prefix is not None: return Path(base_prefix) return Path(sys.prefix) @classmethod def generate_env_name(cls, name: str, cwd: str) -> str: name = name.lower() sanitized_name = re.sub(r'[ $`!*@"\\\r\n\t]', "_", name)[:42] normalized_cwd = os.path.normcase(os.path.realpath(cwd)) h_bytes = hashlib.sha256(encode(normalized_cwd)).digest() h_str = base64.urlsafe_b64encode(h_bytes).decode()[:8] return f"{sanitized_name}-{h_str}"
EnvManager
python
Netflix__metaflow
metaflow/datastore/task_datastore.py
{ "start": 1525, "end": 38332 }
class ____(object): """ TaskDataStore is obtained through FlowDataStore.get_datastore_for_task and is used to store three things: - Task artifacts (using save_artifacts and load_artifacts) which will ultimately be stored using ContentAddressedStore's save_blobs and load_blobs. This is basically the content indexed portion of the storage (identical objects are stored only once). - Metadata information (using save_metadata and load_metadata) which stores JSON encoded metadata about a task in a non-content indexed way in a hierarchical manner (ie: the files are stored in a path indicated by the pathspec (run_id/step_name/task_id)). This portion of the store can be viewed as name indexed (storing two metadata items with the same name will overwrite the previous item so the condition of equality is the name as opposed to the content). - Logs which are a special sort of task metadata but are handled differently (they are not JSON-encodable dictionaries). """ METADATA_ATTEMPT_SUFFIX = "attempt.json" METADATA_DONE_SUFFIX = "DONE.lock" METADATA_DATA_SUFFIX = "data.json" @staticmethod def metadata_name_for_attempt(name, attempt): if attempt is None: return name return "%d.%s" % (attempt, name) @staticmethod def parse_attempt_metadata(name): return name.split(".", 1) def __init__( self, flow_datastore, run_id, step_name, task_id, attempt=None, data_metadata=None, mode="r", allow_not_done=False, persist=True, ): self._storage_impl = flow_datastore._storage_impl self.TYPE = self._storage_impl.TYPE self._ca_store = flow_datastore.ca_store self._environment = flow_datastore.environment self._run_id = run_id self._step_name = step_name self._task_id = task_id self._path = self._storage_impl.path_join( flow_datastore.flow_name, run_id, step_name, task_id ) self._mode = mode self._attempt = attempt self._metadata = flow_datastore.metadata self._parent = flow_datastore self._persist = persist # The GZIP encodings are for backward compatibility self._encodings = {"pickle-v2", "gzip+pickle-v2"} ver = sys.version_info[0] * 10 + sys.version_info[1] if ver >= 36: self._encodings.add("pickle-v4") self._encodings.add("gzip+pickle-v4") self._is_done_set = False # If the mode is 'write', we initialize things to empty if self._mode == "w": self._objects = {} self._info = {} elif self._mode == "r": if data_metadata is not None: # We already loaded the data metadata so just use that self._objects = data_metadata.get("objects", {}) self._info = data_metadata.get("info", {}) else: # What is the latest attempt ID for this task store. # NOTE: We *only* access to the data if the attempt that # produced it is done. In particular, we do not allow access to # a past attempt if a new attempt has started to avoid # inconsistencies (depending on when the user accesses the # datastore, the data may change). We make an exception to that # rule when allow_not_done is True which allows access to things # like logs even for tasks that did not write a done marker max_attempt = None for i in range(metaflow_config.MAX_ATTEMPTS): check_meta = self._metadata_name_for_attempt( self.METADATA_ATTEMPT_SUFFIX, i ) if self.has_metadata(check_meta, add_attempt=False): max_attempt = i elif max_attempt is not None: break if self._attempt is None: self._attempt = max_attempt elif max_attempt is None or self._attempt > max_attempt: # In this case the attempt does not exist, so we can't load # anything self._objects = {} self._info = {} return # Check if the latest attempt was completed successfully except # if we have allow_not_done data_obj = None if self.has_metadata(self.METADATA_DONE_SUFFIX): data_obj = self.load_metadata([self.METADATA_DATA_SUFFIX]) data_obj = data_obj[self.METADATA_DATA_SUFFIX] elif self._attempt is None or not allow_not_done: raise DataException( "No completed attempts of the task was found for task '%s'" % self._path ) if data_obj is not None: self._objects = data_obj.get("objects", {}) self._info = data_obj.get("info", {}) elif self._mode == "d": self._objects = {} self._info = {} if self._attempt is None: for i in range(metaflow_config.MAX_ATTEMPTS): check_meta = self._metadata_name_for_attempt( self.METADATA_ATTEMPT_SUFFIX, i ) if self.has_metadata(check_meta, add_attempt=False): self._attempt = i # Do not allow destructive operations on the datastore if attempt is still in flight # and we explicitly did not allow operating on running tasks. if not allow_not_done and not self.has_metadata(self.METADATA_DONE_SUFFIX): raise DataException( "No completed attempts of the task was found for task '%s'" % self._path ) else: raise DataException("Unknown datastore mode: '%s'" % self._mode) @property def pathspec(self): return "/".join([self.run_id, self.step_name, self.task_id]) @property def run_id(self): return self._run_id @property def step_name(self): return self._step_name @property def task_id(self): return self._task_id @property def attempt(self): return self._attempt @property def ds_metadata(self): return {"objects": self._objects.copy(), "info": self._info.copy()} @property def pathspec_index(self): idxstr = ",".join(map(str, (f.index for f in self["_foreach_stack"]))) if "_iteration_stack" in self: itrstr = ",".join(map(str, (f for f in self["_iteration_stack"]))) return "%s/%s[%s][%s]" % (self._run_id, self._step_name, idxstr, itrstr) return "%s/%s[%s]" % (self._run_id, self._step_name, idxstr) @property def parent_datastore(self): return self._parent @require_mode(None) def get_log_location(self, logprefix, stream): log_name = self._get_log_location(logprefix, stream) path = self._storage_impl.path_join( self._path, self._metadata_name_for_attempt(log_name) ) return self._storage_impl.full_uri(path) @require_mode("r") def keys_for_artifacts(self, names): return [self._objects.get(name) for name in names] @only_if_not_done @require_mode("w") def init_task(self): """ Call this to initialize the datastore with a new attempt. This method requires mode 'w'. """ self.save_metadata({self.METADATA_ATTEMPT_SUFFIX: {"time": time.time()}}) @only_if_not_done @require_mode("w") def transfer_artifacts( self, other_datastore: "TaskDataStore", names: Optional[List[str]] = None ): """ Copies the blobs from other_datastore to this datastore if the datastore roots are different. This is used specifically for spin so we can bring in artifacts from the original datastore. Parameters ---------- other_datastore : TaskDataStore Other datastore from which to copy artifacts from names : List[str], optional, default None If provided, only transfer the artifacts with these names. If None, transfer all artifacts from the other datastore. """ if ( other_datastore.TYPE == self.TYPE and other_datastore._storage_impl.datastore_root == self._storage_impl.datastore_root ): # Nothing to transfer -- artifacts are already saved properly return # Determine which artifacts need to be transferred if names is None: # Transfer all artifacts from other datastore artifacts_to_transfer = list(other_datastore._objects.keys()) else: # Transfer only specified artifacts artifacts_to_transfer = [ name for name in names if name in other_datastore._objects ] if not artifacts_to_transfer: return # Get SHA keys for artifacts to transfer shas_to_transfer = [ other_datastore._objects[name] for name in artifacts_to_transfer ] # Check which blobs are missing locally missing_shas = [] for sha in shas_to_transfer: local_path = self._ca_store._storage_impl.path_join( self._ca_store._prefix, sha[:2], sha ) if not self._ca_store._storage_impl.is_file([local_path])[0]: missing_shas.append(sha) if not missing_shas: return # All blobs already exist locally # Load blobs from other datastore in transfer mode transfer_blobs = other_datastore._ca_store.load_blobs( missing_shas, is_transfer=True ) # Save blobs to local datastore in transfer mode self._ca_store.save_blobs(transfer_blobs, is_transfer=True) @only_if_not_done @require_mode("w") def save_artifacts(self, artifacts_iter, len_hint=0): """ Saves Metaflow Artifacts (Python objects) to the datastore and stores any relevant metadata needed to retrieve them. Typically, objects are pickled but the datastore may perform any operation that it deems necessary. You should only access artifacts using load_artifacts This method requires mode 'w'. Parameters ---------- artifacts : Iterator[(string, object)] Iterator over the human-readable name of the object to save and the object itself len_hint: integer Estimated number of items in artifacts_iter """ artifact_names = [] def pickle_iter(): for name, obj in artifacts_iter: encode_type = "gzip+pickle-v4" if encode_type in self._encodings: try: blob = pickle.dumps(obj, protocol=4) except TypeError as e: raise UnpicklableArtifactException(name) from e else: try: blob = pickle.dumps(obj, protocol=2) encode_type = "gzip+pickle-v2" except (SystemError, OverflowError) as e: raise DataException( "Artifact *%s* is very large (over 2GB). " "You need to use Python 3.6 or newer if you want to " "serialize large objects." % name ) from e except TypeError as e: raise UnpicklableArtifactException(name) from e self._info[name] = { "size": len(blob), "type": str(type(obj)), "encoding": encode_type, } artifact_names.append(name) yield blob # Use the content-addressed store to store all artifacts save_result = self._ca_store.save_blobs(pickle_iter(), len_hint=len_hint) for name, result in zip(artifact_names, save_result): self._objects[name] = result.key @require_mode(None) def load_artifacts(self, names): """ Mirror function to save_artifacts This function will retrieve the objects referenced by 'name'. Each object will be fetched and returned if found. Note that this function will return objects that may not be the same as the ones saved using saved_objects (taking into account possible environment changes, for example different conda environments) but it will return objects that can be used as the objects passed in to save_objects. This method can be used in both 'r' and 'w' mode. For the latter use case, this can happen when `passdown_partial` is called and an artifact passed down that way is then loaded. Parameters ---------- names : List[string] List of artifacts to retrieve Returns ------- Iterator[(string, object)] : An iterator over objects retrieved. """ if not self._info: raise DataException( "Datastore for task '%s' does not have the required metadata to " "load artifacts" % self._path ) to_load = defaultdict(list) for name in names: info = self._info.get(name) # We use gzip+pickle-v2 as this is the oldest/most compatible. # This datastore will always include the proper encoding version so # this is just to be able to read very old artifacts if info: encode_type = info.get("encoding", "gzip+pickle-v2") else: encode_type = "gzip+pickle-v2" if encode_type not in self._encodings: raise DataException( "Python 3.6 or later is required to load artifact '%s'" % name ) else: to_load[self._objects[name]].append(name) # At this point, we load what we don't have from the CAS # We assume that if we have one "old" style artifact, all of them are # like that which is an easy assumption to make since artifacts are all # stored by the same implementation of the datastore for a given task. for key, blob in self._ca_store.load_blobs(to_load.keys()): names = to_load[key] for name in names: # We unpickle everytime to have fully distinct objects (the user # would not expect two artifacts with different names to actually # be aliases of one another) yield name, pickle.loads(blob) @require_mode("r") def get_artifact_sizes(self, names): """ Retrieves file sizes of artifacts defined in 'names' from their respective stored file metadata. Usage restricted to only 'r' mode due to depending on the metadata being written Parameters ---------- names : List[string] List of artifacts to retrieve Returns ------- Iterator[(string, int)] : An iterator over sizes retrieved. """ for name in names: info = self._info.get(name) if info["type"] == _included_file_type: sz = self[name].size else: sz = info.get("size", 0) yield name, sz @require_mode("r") def get_legacy_log_size(self, stream): name = self._metadata_name_for_attempt("%s.log" % stream) path = self._storage_impl.path_join(self._path, name) return self._storage_impl.size_file(path) @require_mode("r") def get_log_size(self, logsources, stream): def _path(s): # construct path for fetching of a single log source _p = self._metadata_name_for_attempt(self._get_log_location(s, stream)) return self._storage_impl.path_join(self._path, _p) paths = list(map(_path, logsources)) sizes = [self._storage_impl.size_file(p) for p in paths] return sum(size for size in sizes if size is not None) @only_if_not_done @require_mode("w") def save_metadata(self, contents, allow_overwrite=True, add_attempt=True): """ Save task metadata. This is very similar to save_artifacts; this function takes a dictionary with the key being the name of the metadata to save and the value being the metadata. The metadata, however, will not be stored in the CAS but rather directly in the TaskDataStore. This method requires mode 'w' Parameters ---------- contents : Dict[string -> JSON-ifiable objects] Dictionary of metadata to store allow_overwrite : boolean, optional If True, allows the overwriting of the metadata, defaults to True add_attempt : boolean, optional If True, adds the attempt identifier to the metadata. defaults to True """ return self._save_file( {k: json.dumps(v).encode("utf-8") for k, v in contents.items()}, allow_overwrite, add_attempt, ) @require_mode("w") def _dangerous_save_metadata_post_done( self, contents, allow_overwrite=True, add_attempt=True ): """ Method identical to save_metadata BUT BYPASSES THE CHECK ON DONE @warning This method should not be used unless you know what you are doing. This will write metadata to a datastore that has been marked as done which is an assumption that other parts of metaflow rely on (ie: when a datastore is marked as done, it is considered to be read-only). Currently only used in the case when the task is executed remotely but there is no (remote) metadata service configured. We therefore use the datastore to share metadata between the task and the Metaflow local scheduler. Due to some other constraints and the current plugin API, we could not use the regular method to save metadata. This method requires mode 'w' Parameters ---------- contents : Dict[string -> JSON-ifiable objects] Dictionary of metadata to store allow_overwrite : boolean, optional If True, allows the overwriting of the metadata, defaults to True add_attempt : boolean, optional If True, adds the attempt identifier to the metadata. defaults to True """ return self._save_file( {k: json.dumps(v).encode("utf-8") for k, v in contents.items()}, allow_overwrite, add_attempt, ) @require_mode("r") def load_metadata(self, names, add_attempt=True): """ Loads metadata saved with `save_metadata` Parameters ---------- names : List[string] The name of the metadata elements to load add_attempt : bool, optional Adds the attempt identifier to the metadata name if True, by default True Returns ------- Dict: string -> JSON decoded object Results indexed by the name of the metadata loaded """ transformer = lambda x: x if sys.version_info < (3, 6): transformer = lambda x: x.decode("utf-8") return { k: json.loads(transformer(v)) if v is not None else None for k, v in self._load_file(names, add_attempt).items() } @require_mode(None) def has_metadata(self, name, add_attempt=True): """ Checks if this TaskDataStore has the metadata requested TODO: Should we make this take multiple names like the other calls? This method operates like load_metadata in both 'w' and 'r' modes. Parameters ---------- names : string Metadata name to fetch add_attempt : bool, optional Adds the attempt identifier to the metadata name if True, by default True Returns ------- boolean True if the metadata exists or False otherwise """ if add_attempt: path = self._storage_impl.path_join( self._path, self._metadata_name_for_attempt(name) ) else: path = self._storage_impl.path_join(self._path, name) return self._storage_impl.is_file([path])[0] @require_mode(None) def get(self, name, default=None): """ Convenience method around load_artifacts for a given name and with a provided default. This method requires mode 'r'. Parameters ---------- name : str Name of the object to get default : object, optional Returns this value if object not found, by default None """ if self._objects: try: return self[name] if name in self._objects else default except DataException: return default return default @require_mode("r") def is_none(self, name): """ Convenience method to test if an artifact is None This method requires mode 'r'. Parameters ---------- name : string Name of the artifact """ if not self._info: return True info = self._info.get(name) if info: obj_type = info.get("type") # Conservatively check if the actual object is None, # in case the artifact is stored using a different python version. # Note that if an object is None and stored in Py2 and accessed in # Py3, this test will fail and we will fall back to the slow path. This # is intended (being conservative) if obj_type == str(type(None)): return True # Slow path since this has to get the object from the datastore return self.get(name) is None @only_if_not_done @require_mode("w") def done(self): """ Mark this task-datastore as 'done' for the current attempt Will throw an exception if mode != 'w' """ self.save_metadata( { self.METADATA_DATA_SUFFIX: { "datastore": self.TYPE, "version": "1.0", "attempt": self._attempt, "python_version": sys.version, "objects": self._objects, "info": self._info, }, self.METADATA_DONE_SUFFIX: "", } ) if self._metadata: self._metadata.register_metadata( self._run_id, self._step_name, self._task_id, [ MetaDatum( field="attempt-done", value=str(self._attempt), type="attempt-done", tags=["attempt_id:{0}".format(self._attempt)], ) ], ) artifacts = [ DataArtifact( name=var, ds_type=self.TYPE, ds_root=self._storage_impl.datastore_root, url=None, sha=sha, type=self._info[var]["encoding"], ) for var, sha in self._objects.items() ] self._metadata.register_data_artifacts( self.run_id, self.step_name, self.task_id, self._attempt, artifacts ) self._is_done_set = True @only_if_not_done @require_mode("w") def clone(self, origin): """ Clone the information located in the TaskDataStore origin into this datastore Parameters ---------- origin : TaskDataStore TaskDataStore to clone """ self._objects = origin._objects self._info = origin._info @only_if_not_done @require_mode("w") def passdown_partial(self, origin, variables): # Pass-down from datastore origin all information related to vars to # this datastore. In other words, this adds to the current datastore all # the variables in vars (obviously, it does not download them or # anything but records information about them). This is used to # propagate parameters between datastores without actually loading the # parameters as well as for merge_artifacts for var in variables: sha = origin._objects.get(var) if sha: self._objects[var] = sha self._info[var] = origin._info[var] @only_if_not_done @require_mode("w") def persist(self, flow): """ Persist any new artifacts that were produced when running flow NOTE: This is a DESTRUCTIVE operation that deletes artifacts from the given flow to conserve memory. Don't rely on artifact attributes of the flow object after calling this function. Parameters ---------- flow : FlowSpec Flow to persist """ if not self._persist: return if flow._datastore: self._objects.update(flow._datastore._objects) self._info.update(flow._datastore._info) # Scan flow object FIRST valid_artifacts = [] current_artifact_names = set() for var in dir(flow): if var.startswith("__") or var in flow._EPHEMERAL: continue # Skip over properties of the class (Parameters or class variables) if hasattr(flow.__class__, var) and isinstance( getattr(flow.__class__, var), property ): continue val = getattr(flow, var) if not ( isinstance(val, MethodType) or isinstance(val, FunctionType) or isinstance(val, Parameter) ): valid_artifacts.append((var, val)) current_artifact_names.add(var) # Transfer ONLY artifacts that aren't being overridden if hasattr(flow._datastore, "orig_datastore"): parent_artifacts = set(flow._datastore._objects.keys()) unchanged_artifacts = parent_artifacts - current_artifact_names if unchanged_artifacts: self.transfer_artifacts( flow._datastore.orig_datastore, names=list(unchanged_artifacts) ) def artifacts_iter(): # we consume the valid_artifacts list destructively to # make sure we don't keep references to artifacts. We # want to avoid keeping original artifacts and encoded # artifacts in memory simultaneously while valid_artifacts: var, val = valid_artifacts.pop() if not var.startswith("_") and var != "name": # NOTE: Destructive mutation of the flow object. We keep # around artifacts called 'name' and anything starting with # '_' as they are used by the Metaflow runtime. delattr(flow, var) yield var, val # Save current artifacts self.save_artifacts(artifacts_iter(), len_hint=len(valid_artifacts)) @only_if_not_done @require_mode("w") def save_logs(self, logsource, stream_data): """ Save log files for multiple streams, represented as a dictionary of streams. Each stream is identified by a type (a string) and is either a stringish or a BytesIO object or a Path object. Parameters ---------- logsource : string Identifies the source of the stream (runtime, task, etc) stream_data : Dict[string -> bytes or Path] Each entry should have a string as the key indicating the type of the stream ('stderr', 'stdout') and as value should be bytes or a Path from which to stream the log. """ to_store_dict = {} for stream, data in stream_data.items(): n = self._get_log_location(logsource, stream) if isinstance(data, Path): to_store_dict[n] = FileIO(str(data), mode="r") else: to_store_dict[n] = data self._save_file(to_store_dict) @require_mode("d") def scrub_logs(self, logsources, stream, attempt_override=None): path_logsources = { self._metadata_name_for_attempt( self._get_log_location(s, stream), attempt_override=attempt_override, ): s for s in logsources } # Legacy log paths legacy_log = self._metadata_name_for_attempt( "%s.log" % stream, attempt_override ) path_logsources[legacy_log] = stream existing_paths = [ path for path in path_logsources.keys() if self.has_metadata(path, add_attempt=False) ] # Replace log contents with [REDACTED source stream] to_store_dict = { path: bytes("[REDACTED %s %s]" % (path_logsources[path], stream), "utf-8") for path in existing_paths } self._save_file(to_store_dict, add_attempt=False, allow_overwrite=True) @require_mode("r") def load_log_legacy(self, stream, attempt_override=None): """ Load old-style, pre-mflog, log file represented as a bytes object. """ name = self._metadata_name_for_attempt("%s.log" % stream, attempt_override) r = self._load_file([name], add_attempt=False)[name] return r if r is not None else b"" @require_mode("r") def load_logs(self, logsources, stream, attempt_override=None): paths = dict( map( lambda s: ( self._metadata_name_for_attempt( self._get_log_location(s, stream), attempt_override=attempt_override, ), s, ), logsources, ) ) r = self._load_file(paths.keys(), add_attempt=False) return [(paths[k], v if v is not None else b"") for k, v in r.items()] @require_mode(None) def items(self): if self._objects: return self._objects.items() return {} @require_mode(None) def to_dict(self, show_private=False, max_value_size=None, include=None): d = {} for k, _ in self.items(): if include and k not in include: continue if k[0] == "_" and not show_private: continue info = self._info[k] if max_value_size is not None: if info["type"] == _included_file_type: sz = self[k].size else: sz = info.get("size", 0) if sz == 0 or sz > max_value_size: d[k] = ArtifactTooLarge() else: d[k] = self[k] if info["type"] == _included_file_type: d[k] = d[k].decode(k) else: d[k] = self[k] if info["type"] == _included_file_type: d[k] = d[k].decode(k) return d @require_mode("r") def format(self, **kwargs): def lines(): for k, v in self.to_dict(**kwargs).items(): if self._info[k]["type"] == _included_file_type: sz = self[k].size else: sz = self._info[k]["size"] yield k, "*{key}* [size: {size} type: {type}] = {value}".format( key=k, value=v, size=sz, type=self._info[k]["type"] ) return "\n".join(line for k, line in sorted(lines())) @require_mode(None) def __contains__(self, name): if self._objects: return name in self._objects return False @require_mode(None) def __getitem__(self, name): _, obj = next(self.load_artifacts([name])) return obj @require_mode("r") def __iter__(self): if self._objects: return iter(self._objects) return iter([]) @require_mode("r") def __str__(self): return self.format(show_private=True, max_value_size=1000) def _metadata_name_for_attempt(self, name, attempt_override=None): return self.metadata_name_for_attempt( name, self._attempt if attempt_override is None else attempt_override ) @staticmethod def _get_log_location(logprefix, stream): return "%s_%s.log" % (logprefix, stream) def _save_file(self, contents, allow_overwrite=True, add_attempt=True): """ Saves files in the directory for this TaskDataStore. This can be metadata, a log file or any other data that doesn't need to (or shouldn't) be stored in the Content Addressed Store. Parameters ---------- contents : Dict[string -> stringish or RawIOBase or BufferedIOBase] Dictionary of file to store allow_overwrite : boolean, optional If True, allows the overwriting of the metadata, defaults to True add_attempt : boolean, optional If True, adds the attempt identifier to the metadata, defaults to True """ def blob_iter(): for name, value in contents.items(): if add_attempt: path = self._storage_impl.path_join( self._path, self._metadata_name_for_attempt(name) ) else: path = self._storage_impl.path_join(self._path, name) if isinstance(value, (RawIOBase, BufferedIOBase)) and value.readable(): yield path, value elif is_stringish(value): yield path, to_fileobj(value) else: raise DataException( "Metadata '%s' for task '%s' has an invalid type: %s" % (name, self._path, type(value)) ) self._storage_impl.save_bytes(blob_iter(), overwrite=allow_overwrite) def _load_file(self, names, add_attempt=True): """ Loads files from the TaskDataStore directory. These can be metadata, logs or any other files Parameters ---------- names : List[string] The names of the files to load add_attempt : bool, optional Adds the attempt identifier to the metadata name if True, by default True Returns ------- Dict: string -> bytes Results indexed by the name of the metadata loaded """ to_load = [] for name in names: if add_attempt: path = self._storage_impl.path_join( self._path, self._metadata_name_for_attempt(name) ) else: path = self._storage_impl.path_join(self._path, name) to_load.append(path) results = {} with self._storage_impl.load_bytes(to_load) as load_results: for key, path, meta in load_results: if add_attempt: _, name = self.parse_attempt_metadata( self._storage_impl.basename(key) ) else: name = self._storage_impl.basename(key) if path is None: results[name] = None else: with open(path, "rb") as f: results[name] = f.read() return results
TaskDataStore
python
mwaskom__seaborn
tests/test_categorical.py
{ "start": 26706, "end": 26809 }
class ____(SharedScatterTests): func = staticmethod(partial(swarmplot, warn_thresh=1))
TestSwarmPlot
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/transfers/test_dynamodb_to_s3.py
{ "start": 1919, "end": 13802 }
class ____: def setup_method(self): self.output_queue = [] def mock_upload_file(self, Filename, Bucket, Key): with open(Filename) as f: lines = f.readlines() for line in lines: self.output_queue.append(json.loads(line)) @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.S3Hook") @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.DynamoDBHook") def test_dynamodb_to_s3_success(self, mock_aws_dynamodb_hook, mock_s3_hook): responses = [ { "Items": [{"a": 1}, {"b": 2}], "LastEvaluatedKey": "123", }, { "Items": [{"c": 3}], }, ] table = MagicMock() table.return_value.scan.side_effect = responses mock_aws_dynamodb_hook.return_value.conn.Table = table s3_client = MagicMock() s3_client.return_value.upload_file = self.mock_upload_file mock_s3_hook.return_value.get_conn = s3_client dynamodb_to_s3_operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", s3_bucket_name="airflow-bucket", file_size=4000, ) dynamodb_to_s3_operator.execute(context={}) assert self.output_queue == [{"a": 1}, {"b": 2}, {"c": 3}] @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.S3Hook") @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.DynamoDBHook") def test_dynamodb_to_s3_success_with_decimal(self, mock_aws_dynamodb_hook, mock_s3_hook): a = Decimal(10.028) b = Decimal("10.048") responses = [ { "Items": [{"a": a}, {"b": b}], } ] table = MagicMock() table.return_value.scan.side_effect = responses mock_aws_dynamodb_hook.return_value.conn.Table = table s3_client = MagicMock() s3_client.return_value.upload_file = self.mock_upload_file mock_s3_hook.return_value.get_conn = s3_client dynamodb_to_s3_operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", s3_bucket_name="airflow-bucket", file_size=4000, ) dynamodb_to_s3_operator.execute(context={}) assert self.output_queue == [{"a": float(a)}, {"b": float(b)}] @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.S3Hook") @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.DynamoDBHook") def test_dynamodb_to_s3_default_connection(self, mock_aws_dynamodb_hook, mock_s3_hook): responses = [ { "Items": [{"a": 1}, {"b": 2}], "LastEvaluatedKey": "123", }, { "Items": [{"c": 3}], }, ] table = MagicMock() table.return_value.scan.side_effect = responses mock_aws_dynamodb_hook.return_value.get_conn.return_value.Table = table s3_client = MagicMock() s3_client.return_value.upload_file = self.mock_upload_file mock_s3_hook.return_value.get_conn = s3_client dynamodb_to_s3_operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", s3_bucket_name="airflow-bucket", file_size=4000, ) dynamodb_to_s3_operator.execute(context={}) aws_conn_id = "aws_default" mock_s3_hook.assert_called_with(aws_conn_id=aws_conn_id) mock_aws_dynamodb_hook.assert_called_with(aws_conn_id=aws_conn_id) @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.S3Hook") @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.DynamoDBHook") def test_dynamodb_to_s3_with_different_aws_conn_id(self, mock_aws_dynamodb_hook, mock_s3_hook): responses = [ { "Items": [{"a": 1}, {"b": 2}], "LastEvaluatedKey": "123", }, { "Items": [{"c": 3}], }, ] table = MagicMock() table.return_value.scan.side_effect = responses mock_aws_dynamodb_hook.return_value.conn.Table = table s3_client = MagicMock() s3_client.return_value.upload_file = self.mock_upload_file mock_s3_hook.return_value.get_conn = s3_client aws_conn_id = "test-conn-id" dynamodb_to_s3_operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", s3_bucket_name="airflow-bucket", file_size=4000, source_aws_conn_id=aws_conn_id, ) dynamodb_to_s3_operator.execute(context={}) assert self.output_queue == [{"a": 1}, {"b": 2}, {"c": 3}] mock_s3_hook.assert_called_with(aws_conn_id=aws_conn_id) mock_aws_dynamodb_hook.assert_called_with(aws_conn_id=aws_conn_id) @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.S3Hook") @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.DynamoDBHook") def test_dynamodb_to_s3_with_two_different_connections(self, mock_aws_dynamodb_hook, mock_s3_hook): responses = [ { "Items": [{"a": 1}, {"b": 2}], "LastEvaluatedKey": "123", }, { "Items": [{"c": 3}], }, ] table = MagicMock() table.return_value.scan.side_effect = responses mock_aws_dynamodb_hook.return_value.conn.Table = table s3_client = MagicMock() s3_client.return_value.upload_file = self.mock_upload_file mock_s3_hook.return_value.get_conn = s3_client s3_aws_conn_id = "test-conn-id" dynamodb_conn_id = "test-dynamodb-conn-id" dynamodb_to_s3_operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", source_aws_conn_id=dynamodb_conn_id, s3_bucket_name="airflow-bucket", file_size=4000, dest_aws_conn_id=s3_aws_conn_id, ) dynamodb_to_s3_operator.execute(context={}) assert self.output_queue == [{"a": 1}, {"b": 2}, {"c": 3}] mock_s3_hook.assert_called_with(aws_conn_id=s3_aws_conn_id) mock_aws_dynamodb_hook.assert_called_with(aws_conn_id=dynamodb_conn_id) @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.S3Hook") @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.DynamoDBHook") def test_dynamodb_to_s3_with_just_dest_aws_conn_id(self, mock_aws_dynamodb_hook, mock_s3_hook): responses = [ { "Items": [{"a": 1}, {"b": 2}], "LastEvaluatedKey": "123", }, { "Items": [{"c": 3}], }, ] table = MagicMock() table.return_value.scan.side_effect = responses mock_aws_dynamodb_hook.return_value.conn.Table = table s3_client = MagicMock() s3_client.return_value.upload_file = self.mock_upload_file mock_s3_hook.return_value.get_conn = s3_client s3_aws_conn_id = "test-conn-id" dynamodb_to_s3_operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", s3_bucket_name="airflow-bucket", file_size=4000, dest_aws_conn_id=s3_aws_conn_id, ) dynamodb_to_s3_operator.execute(context={}) assert self.output_queue == [{"a": 1}, {"b": 2}, {"c": 3}] mock_aws_dynamodb_hook.assert_called_with(aws_conn_id="aws_default") mock_s3_hook.assert_called_with(aws_conn_id=s3_aws_conn_id) @pytest.mark.db_test def test_render_template(self, session, clean_dags_dagruns_and_dagbundles, testing_dag_bundle): dag = DAG("test_render_template_dag_id", schedule=None, start_date=datetime(2020, 1, 1)) operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3_test_render", dag=dag, dynamodb_table_name="{{ ds }}", s3_key_prefix="{{ ds }}", s3_bucket_name="{{ ds }}", file_size=4000, source_aws_conn_id="{{ ds }}", dest_aws_conn_id="{{ ds }}", ) if AIRFLOW_V_3_0_PLUS: from airflow.models.dag_version import DagVersion sync_dag_to_db(dag) dag_version = DagVersion.get_latest_version(dag.dag_id) ti = TaskInstance(operator, run_id="something", dag_version_id=dag_version.id) ti.dag_run = DagRun( dag_id=dag.dag_id, run_id="something", logical_date=timezone.datetime(2020, 1, 1), run_type=DagRunType.MANUAL, state=DagRunState.RUNNING, ) else: ti = TaskInstance(operator, run_id="something") ti.dag_run = DagRun( dag_id=dag.dag_id, run_id="something", execution_date=timezone.datetime(2020, 1, 1), run_type=DagRunType.MANUAL, state=DagRunState.RUNNING, ) session.add(ti) session.commit() ti.render_templates() assert getattr(operator, "source_aws_conn_id") == "2020-01-01" assert getattr(operator, "dest_aws_conn_id") == "2020-01-01" assert getattr(operator, "s3_bucket_name") == "2020-01-01" assert getattr(operator, "dynamodb_table_name") == "2020-01-01" assert getattr(operator, "s3_key_prefix") == "2020-01-01" @patch("airflow.providers.amazon.aws.transfers.dynamodb_to_s3.DynamoDBToS3Operator._export_entire_data") def test_dynamodb_execute_calling_export_entire_data(self, _export_entire_data): """Test that DynamoDBToS3Operator when called without export_time will call _export_entire_data""" dynamodb_to_s3_operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", s3_bucket_name="airflow-bucket", file_size=4000, ) dynamodb_to_s3_operator.execute(context={}) _export_entire_data.assert_called() @patch( "airflow.providers.amazon.aws.transfers.dynamodb_to_s3.DynamoDBToS3Operator." "_export_table_to_point_in_time" ) def test_dynamodb_execute_calling_export_table_to_point_in_time(self, _export_table_to_point_in_time): """Test that DynamoDBToS3Operator when called without export_time will call _export_table_to_point_in_time. Which implements point in time recovery logic""" dynamodb_to_s3_operator = DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", s3_bucket_name="airflow-bucket", file_size=4000, point_in_time_export=True, export_time=datetime(year=1983, month=1, day=1), ) dynamodb_to_s3_operator.execute(context={}) _export_table_to_point_in_time.assert_called() def test_dynamodb_with_future_date(self): """Test that DynamoDBToS3Operator should raise a exception when future date is passed in export_time parameter""" with pytest.raises(ValueError, match="The export_time parameter cannot be a future time."): DynamoDBToS3Operator( task_id="dynamodb_to_s3", dynamodb_table_name="airflow_rocks", s3_bucket_name="airflow-bucket", file_size=4000, point_in_time_export=True, export_time=datetime(year=3000, month=1, day=1), ).execute(context={})
TestDynamodbToS3
python
django-extensions__django-extensions
tests/management/commands/test_syncdata.py
{ "start": 2125, "end": 3667 }
class ____(TestCase): """Tests for syncdata command.""" @patch("sys.stdout", new_callable=StringIO) def test_should_print_No_fixtures_found_if_fixture_labels_not_provided( self, m_stdout ): call_command("syncdata", verbosity=2) self.assertEqual("No fixtures found.\n", m_stdout.getvalue()) @patch("sys.stdout", new_callable=StringIO) def test_should_print_No_fixtures_found_if_fixtures_not_found(self, m_stdout): call_command("syncdata", "foo", verbosity=2) self.assertIn("No fixtures found.\n", m_stdout.getvalue()) def test_should_keep_old_objects_and_load_data_from_json_fixture(self): User.objects.all().delete() User.objects.create(username="foo") call_command( "syncdata", "--skip-remove", os.path.join(TEST_FIXTURE_DIR, "users.json"), verbosity=2, ) self.assertTrue(User.objects.filter(username="jdoe").exists()) self.assertTrue(User.objects.filter(username="foo").exists()) @patch("sys.stdout", new_callable=StringIO) def test_should_delete_old_objects_and_load_data_from_json_fixture(self, m_stdout): User.objects.all().delete() User.objects.create(username="foo") call_command("syncdata", "users.json", verbosity=2) self.assertTrue(User.objects.filter(username="jdoe").exists()) self.assertEqual(User.objects.count(), 1) self.assertIn("Installed 1 object from 1 fixture", m_stdout.getvalue())
SyncDataTests
python
PrefectHQ__prefect
tests/server/orchestration/test_task_concurrency_v2_integration.py
{ "start": 827, "end": 13648 }
class ____: """Test SecureTaskConcurrencySlots with V2 Global Concurrency Limits.""" async def create_v1_concurrency_limit( self, session: AsyncSession, tag: str, limit: int ) -> None: """Helper to create a V1 concurrency limit.""" cl_create = actions.ConcurrencyLimitCreate( tag=tag, concurrency_limit=limit, ).model_dump(mode="json") cl_model = core.ConcurrencyLimit(**cl_create) await concurrency_limits.create_concurrency_limit( session=session, concurrency_limit=cl_model ) async def create_v2_concurrency_limit( self, session: AsyncSession, tag: str, limit: int ) -> ConcurrencyLimitV2: """Helper to create a V2 concurrency limit.""" gcl = await concurrency_limits_v2.create_concurrency_limit( session=session, concurrency_limit=actions.ConcurrencyLimitV2Create( name=f"tag:{tag}", limit=limit, active=True, ), ) return gcl async def test_v2_limits_take_priority_over_v1( self, session: AsyncSession, initialize_orchestration: Callable[..., Any], ) -> None: """Test that V2 limits are processed before V1 limits for the same tag.""" # Create both V1 and V2 limits for the same tag await self.create_v1_concurrency_limit(session, "shared-tag", 2) v2_limit = await self.create_v2_concurrency_limit(session, "shared-tag", 1) concurrency_policy = [SecureTaskConcurrencySlots] running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # First task should use V2 limit (limit 1) not V1 limit (limit 2) ctx1 = await initialize_orchestration( session, "task", *running_transition, run_tags=["shared-tag"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx1 = await stack.enter_async_context(rule(ctx1, *running_transition)) await ctx1.validate_proposed_state() assert ctx1.response_status == SetStateStatus.ACCEPT # Check that V2 limit has 1 active slot await session.refresh(v2_limit) assert v2_limit.active_slots == 1 # Second task should be delayed because V2 limit is reached (limit 1) ctx2 = await initialize_orchestration( session, "task", *running_transition, run_tags=["shared-tag"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx2 = await stack.enter_async_context(rule(ctx2, *running_transition)) await ctx2.validate_proposed_state() assert ctx2.response_status == SetStateStatus.WAIT async def test_v2_zero_limit_aborts_transition( self, session: AsyncSession, initialize_orchestration: Callable[..., Any], ) -> None: """Test that V2 limits with zero limit abort transitions.""" await self.create_v2_concurrency_limit(session, "zero-tag", 0) concurrency_policy = [SecureTaskConcurrencySlots] running_transition = (states.StateType.PENDING, states.StateType.RUNNING) ctx = await initialize_orchestration( session, "task", *running_transition, run_tags=["zero-tag"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx = await stack.enter_async_context(rule(ctx, *running_transition)) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ABORT assert "is 0 and will deadlock" in ctx.response_details.reason async def test_v2_lease_creation_and_metadata( self, session: AsyncSession, initialize_orchestration: Callable[..., Any], ) -> None: """Test that V2 limits create proper leases with metadata.""" v2_limit = await self.create_v2_concurrency_limit(session, "lease-tag", 2) running_transition = (states.StateType.PENDING, states.StateType.RUNNING) ctx = await initialize_orchestration( session, "task", *running_transition, run_tags=["lease-tag"] ) # Use the rule with try/finally to ensure cleanup happens rule = SecureTaskConcurrencySlots(ctx, *running_transition) try: async with rule as rule_ctx: await rule_ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT # Verify V2 limit active slots were incremented await session.refresh(v2_limit) assert v2_limit.active_slots == 1 # Verify lease was created - check the rule's internal tracking assert len(rule._acquired_v2_lease_ids) == 1 lease_id = rule._acquired_v2_lease_ids[0] # Verify lease exists and has proper metadata lease_storage = get_concurrency_lease_storage() lease = await lease_storage.read_lease(lease_id=lease_id) assert lease is not None assert lease.metadata is not None assert lease.metadata.slots == 1 assert lease.metadata.holder.type == "task_run" assert lease.metadata.holder.id == ctx.run.id finally: # Cleanup happens in rule's cleanup method pass async def test_mixed_v1_v2_tags_on_same_task( self, session: AsyncSession, initialize_orchestration: Callable[..., Any], ) -> None: """Test task with both V1 and V2 tags processes V2 first, then V1.""" # Create V2 limit for one tag, V1 for another v2_limit = await self.create_v2_concurrency_limit(session, "v2-tag", 1) await self.create_v1_concurrency_limit(session, "v1-tag", 1) concurrency_policy = [SecureTaskConcurrencySlots] running_transition = (states.StateType.PENDING, states.StateType.RUNNING) ctx = await initialize_orchestration( session, "task", *running_transition, run_tags=["v2-tag", "v1-tag"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx = await stack.enter_async_context(rule(ctx, *running_transition)) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT # Verify V2 limit was used await session.refresh(v2_limit) assert v2_limit.active_slots == 1 # Verify V1 limit was also used (should have the task run ID in active_slots) v1_limit = await concurrency_limits.read_concurrency_limit_by_tag( session, "v1-tag" ) assert str(ctx.run.id) in v1_limit.active_slots async def test_v2_lease_cleanup_on_abort( self, session: AsyncSession, initialize_orchestration: Callable[..., Any], ) -> None: """Test that V2 leases are properly cleaned up when transition is aborted.""" # Create a zero limit which will trigger abort immediately zero_limit = await self.create_v2_concurrency_limit(session, "zero-tag", 0) concurrency_policy = [SecureTaskConcurrencySlots] running_transition = (states.StateType.PENDING, states.StateType.RUNNING) ctx = await initialize_orchestration( session, "task", *running_transition, run_tags=["zero-tag"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx = await stack.enter_async_context(rule(ctx, *running_transition)) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ABORT # Verify zero limit is still zero - no slots should have been acquired await session.refresh(zero_limit) assert zero_limit.active_slots == 0 async def test_v1_limits_processed_when_no_v2_overlap( self, session: AsyncSession, initialize_orchestration: Callable[..., Any], ) -> None: """Test that V1 limits are still processed for tags without V2 limits.""" # Create V2 limit for one tag, V1 for different tags await self.create_v2_concurrency_limit(session, "v2-only", 2) await self.create_v1_concurrency_limit(session, "v1-only", 1) concurrency_policy = [SecureTaskConcurrencySlots] running_transition = (states.StateType.PENDING, states.StateType.RUNNING) # Test with only V1 tag ctx1 = await initialize_orchestration( session, "task", *running_transition, run_tags=["v1-only"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx1 = await stack.enter_async_context(rule(ctx1, *running_transition)) await ctx1.validate_proposed_state() assert ctx1.response_status == SetStateStatus.ACCEPT # Verify V1 limit was used v1_limit = await concurrency_limits.read_concurrency_limit_by_tag( session, "v1-only" ) assert str(ctx1.run.id) in v1_limit.active_slots # Test second task hits V1 limit ctx2 = await initialize_orchestration( session, "task", *running_transition, run_tags=["v1-only"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx2 = await stack.enter_async_context(rule(ctx2, *running_transition)) await ctx2.validate_proposed_state() assert ctx2.response_status == SetStateStatus.WAIT async def test_v2_inactive_limits_ignored( self, session: AsyncSession, initialize_orchestration: Callable[..., Any], ) -> None: """Test that inactive V2 limits are ignored.""" # Create inactive V2 limit and active V1 limit for same tag v2_limit = await concurrency_limits_v2.create_concurrency_limit( session=session, concurrency_limit=actions.ConcurrencyLimitV2Create( name="tag:inactive-tag", limit=1, active=False, # Inactive ), ) await self.create_v1_concurrency_limit(session, "inactive-tag", 2) concurrency_policy = [SecureTaskConcurrencySlots] running_transition = (states.StateType.PENDING, states.StateType.RUNNING) ctx = await initialize_orchestration( session, "task", *running_transition, run_tags=["inactive-tag"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx = await stack.enter_async_context(rule(ctx, *running_transition)) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT # Verify V2 limit was not used (should be 0 active slots) await session.refresh(v2_limit) assert v2_limit.active_slots == 0 # Verify V1 limit was used instead v1_limit = await concurrency_limits.read_concurrency_limit_by_tag( session, "inactive-tag" ) assert str(ctx.run.id) in v1_limit.active_slots async def test_v2_tags_excluded_from_v1_processing( self, session: AsyncSession, initialize_orchestration: Callable[..., Any], ) -> None: """Test that tags with V2 limits are excluded from V1 processing.""" # Create both V2 and V1 limits for the same tag v2_limit = await self.create_v2_concurrency_limit(session, "shared-tag", 5) await self.create_v1_concurrency_limit(session, "shared-tag", 2) concurrency_policy = [SecureTaskConcurrencySlots] running_transition = (states.StateType.PENDING, states.StateType.RUNNING) ctx = await initialize_orchestration( session, "task", *running_transition, run_tags=["shared-tag"] ) async with contextlib.AsyncExitStack() as stack: for rule in concurrency_policy: ctx = await stack.enter_async_context(rule(ctx, *running_transition)) await ctx.validate_proposed_state() assert ctx.response_status == SetStateStatus.ACCEPT # V2 limit should be used await session.refresh(v2_limit) assert v2_limit.active_slots == 1 # V1 limit should NOT be used (active_slots should be empty) v1_limit = await concurrency_limits.read_concurrency_limit_by_tag( session, "shared-tag" ) assert str(ctx.run.id) not in v1_limit.active_slots assert len(v1_limit.active_slots) == 0
TestSecureTaskConcurrencySlotsV2Integration
python
getsentry__sentry
src/sentry/monitors/system_incidents.py
{ "start": 13371, "end": 14090 }
class ____: ts: datetime """ The associated timestamp of the decision. Typically this will be the clock tick when the decision was made. However for a incident start and end transitions this will be the back-dated timestamp of when the state began. INCIDENT_STARTED -> Tick when the incident truly starts INCIDENT_RECOVERED -> Tick when the incident truly recovered """ decision: TickAnomalyDecision """ The recorded decision made for the clock tick """ transition: AnomalyTransition | None = None """ Reflects the transition status when making a tick decision results in a state transition. None if the decision has not changed. """
DecisionResult
python
milvus-io__pymilvus
pymilvus/client/utils.py
{ "start": 10084, "end": 17314 }
class ____: _checked = False # whether scipy.sparse.*_matrix classes exists _matrix_available = False # whether scipy.sparse.*_array classes exists _array_available = False @classmethod def _init(cls): if cls._checked: return scipy_spec = importlib.util.find_spec("scipy") if scipy_spec is not None: # when scipy is not installed, find_spec("scipy.sparse") directly # throws exception instead of returning None. sparse_spec = importlib.util.find_spec("scipy.sparse") if sparse_spec is not None: scipy_sparse = importlib.util.module_from_spec(sparse_spec) sparse_spec.loader.exec_module(scipy_sparse) # all scipy.sparse.*_matrix classes are introduced in the same scipy # version, so we only need to check one of them. cls._matrix_available = hasattr(scipy_sparse, "csr_matrix") # all scipy.sparse.*_array classes are introduced in the same scipy # version, so we only need to check one of them. cls._array_available = hasattr(scipy_sparse, "csr_array") cls._checked = True @classmethod def is_spmatrix(cls, data: Any): cls._init() if not cls._matrix_available: return False # ruff: noqa: PLC0415 from scipy.sparse import isspmatrix return isspmatrix(data) @classmethod def is_sparray(cls, data: Any): cls._init() if not cls._array_available: return False # ruff: noqa: PLC0415 from scipy.sparse import issparse, isspmatrix return issparse(data) and not isspmatrix(data) @classmethod def is_scipy_sparse(cls, data: Any): return cls.is_spmatrix(data) or cls.is_sparray(data) # in search results, if output fields includes a sparse float vector field, we # will return a SparseRowOutputType for each entity. Using Dict for readability. # TODO(SPARSE): to allow the user to specify output format. SparseRowOutputType = Dict[int, float] # this import will be called only during static type checking if TYPE_CHECKING: from scipy.sparse import ( bsr_array, coo_array, csc_array, csr_array, dia_array, dok_array, lil_array, spmatrix, ) # we accept the following types as input for sparse matrix in user facing APIs # such as insert, search, etc.: # - scipy sparse array/matrix family: csr, csc, coo, bsr, dia, dok, lil # - iterable of iterables, each element(iterable) is a sparse vector with index # as key and value as float. # dict example: [{2: 0.33, 98: 0.72, ...}, {4: 0.45, 198: 0.52, ...}, ...] # list of tuple example: [[(2, 0.33), (98, 0.72), ...], [(4, 0.45), ...], ...] # both index/value can be str numbers: {'2': '3.1'} SparseMatrixInputType = Union[ Iterable[ Union[ SparseRowOutputType, Iterable[Tuple[int, float]], # only type hint, we accept int/float like types ] ], "csc_array", "coo_array", "bsr_array", "dia_array", "dok_array", "lil_array", "csr_array", "spmatrix", ] def is_sparse_vector_type(data_type: DataType) -> bool: return data_type == data_type.SPARSE_FLOAT_VECTOR dense_float_vector_type_set = { DataType.FLOAT_VECTOR, DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR, } dense_vector_type_set = { DataType.FLOAT_VECTOR, DataType.FLOAT16_VECTOR, DataType.BFLOAT16_VECTOR, DataType.INT8_VECTOR, } def is_dense_float_vector_type(data_type: DataType) -> bool: return data_type in dense_float_vector_type_set def is_float_vector_type(data_type: DataType): return is_sparse_vector_type(data_type) or is_dense_float_vector_type(data_type) def is_binary_vector_type(data_type: DataType): return data_type == DataType.BINARY_VECTOR def is_int_vector_type(data_type: DataType): return data_type == DataType.INT8_VECTOR def is_vector_type(data_type: DataType): return ( is_float_vector_type(data_type) or is_binary_vector_type(data_type) or is_int_vector_type(data_type) ) # parses plain bytes to a sparse float vector(SparseRowOutputType) def sparse_parse_single_row(data: bytes) -> SparseRowOutputType: if len(data) % 8 != 0: raise ParamError(message=f"The length of data must be a multiple of 8, got {len(data)}") return { struct.unpack("I", data[i : i + 4])[0]: struct.unpack("f", data[i + 4 : i + 8])[0] for i in range(0, len(data), 8) } def convert_struct_fields_to_user_format(struct_array_fields: List[Dict]) -> List[Dict]: """ Convert internal struct_array_fields representation to user-friendly format. :param struct_array_fields: List of struct field info from server :return: List of user-friendly field dictionaries """ converted_fields = [] for struct_field_info in struct_array_fields: # Convert to user perspective: a field of type ARRAY with element_type STRUCT user_struct_field = { "field_id": struct_field_info.get("field_id"), "name": struct_field_info["name"], "description": struct_field_info.get("description", ""), "type": DataType.ARRAY, "element_type": DataType.STRUCT, "params": {}, } # Extract max_capacity from first field (all fields should have the same value) max_capacity = None for f in struct_field_info.get("fields", []): params = f.get("params", {}) if isinstance(params, dict) and params.get("max_capacity"): max_capacity = params["max_capacity"] break if max_capacity: user_struct_field["params"]["max_capacity"] = max_capacity # Convert struct sub-fields to user-defined types struct_fields = [] for f in struct_field_info.get("fields", []): # Struct fields are always ARRAY or ARRAY_OF_VECTOR, so element_type must exist # Handle both cases: element_type as dict key or already converted DataType user_field_type = f.get("element_type") if user_field_type: struct_sub_field = { "field_id": f.get("field_id"), "name": f["name"], "type": user_field_type, "description": f.get("description", ""), } params = f.get("params", {}) if params and isinstance(params, dict): cleaned_params = {k: v for k, v in params.items() if k != "max_capacity"} if cleaned_params: struct_sub_field["params"] = cleaned_params struct_fields.append(struct_sub_field) user_struct_field["struct_fields"] = struct_fields converted_fields.append(user_struct_field) return converted_fields def validate_iso_timestamp(s: str) -> bool: try: isoparse(s) except (ValueError, TypeError): return False else: return True
SciPyHelper
python
airbytehq__airbyte
airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/documentation/documentation.py
{ "start": 17468, "end": 20595 }
class ____(CheckDocumentationContent): required = True expected_section_index = 0 @property def name(self) -> str: return f"'{self.header}' section of the documentation follows our guidelines" @property def description(self) -> str: templates = TemplateContent("CONNECTOR_NAME_FROM_METADATA").section(self.header) if templates is None: template = "" # Provide default empty template if section is missing elif len(templates) > 1: template = templates[1] else: template = templates[0] return generate_description("section_content_description.md.j2", {"header": self.header, "template": template}) @property @abc.abstractmethod def header(self) -> str: """The name of header for validating content""" def check_section(self, connector: Connector) -> List[str]: documentation = DocumentationContent(connector=connector) if self.header not in documentation.headers: if self.required: return [f"Documentation does not have {self.header} section."] return [] errors: List[str] = [] expected = TemplateContent(connector.name_from_metadata).section(self.header)[self.expected_section_index] # type: ignore actual_contents = documentation.section(self.header) if actual_contents is None: return [f"Documentation {self.header} section is empty"] actual_contents = [c[: len(expected)] if len(c) > len(expected) else c for c in actual_contents] close_matches = get_close_matches(expected, actual_contents) if not close_matches: return [f"Please review your {self.header} section, unable to find the expected content:\n{expected}"] # Commenting out this check to allow for more lenient matching of the expected content. # Leaving it here for reference in case we want to revert this change. # actual = close_matches[0] # if actual != expected: # errors = list(ndiff(actual.splitlines(keepends=True), expected.splitlines(keepends=True))) return errors def _run(self, connector: Connector) -> CheckResult: if not connector.documentation_file_path or not connector.documentation_file_path.exists(): return self.fail( connector=connector, message="Could not check documentation structure as the documentation file is missing.", ) if not connector.documentation_file_path.read_text(): return self.fail( connector=connector, message="Documentation file is empty", ) errors = self.check_section(connector) if errors: return self.fail( connector=connector, message=f"Connector {self.header} section content does not follow standard template:\n{''.join(errors)}", ) return self.pass_( connector=connector, message="Documentation guidelines are followed", )
CheckSection
python
wandb__wandb
wandb/sdk/artifacts/artifact_ttl.py
{ "start": 81, "end": 122 }
class ____(Enum): INHERIT = 0
ArtifactTTL
python
apache__airflow
helm-tests/tests/helm_tests/airflow_aux/test_remote_logging.py
{ "start": 1397, "end": 8843 }
class ____: """Tests elasticsearch configuration behaviors.""" def test_should_not_generate_secret_document_if_elasticsearch_disabled(self): docs = render_chart( values={"elasticsearch": {"enabled": False}}, show_only=[ES_SECRET_TEMPLATE], ) assert len(docs) == 0 def test_should_raise_error_when_connection_not_provided(self): with pytest.raises(CalledProcessError) as ex_ctx: render_chart( values={ "elasticsearch": { "enabled": True, } }, show_only=[ES_SECRET_TEMPLATE], ) assert ( "You must set one of the values elasticsearch.secretName or elasticsearch.connection " "when using a Elasticsearch" in ex_ctx.value.stderr.decode() ) def test_should_raise_error_when_conflicting_options(self): with pytest.raises(CalledProcessError) as ex_ctx: render_chart( values={ "elasticsearch": { "enabled": True, "secretName": "my-test", "connection": { "user": "username!@#$%%^&*()", "pass": "password!@#$%%^&*()", "host": "elastichostname", }, }, }, show_only=[ES_SECRET_TEMPLATE], ) assert ( "You must not set both values elasticsearch.secretName and elasticsearch.connection" in ex_ctx.value.stderr.decode() ) def test_scheduler_should_add_log_port_when_local_executor_and_elasticsearch_disabled(self): docs = render_chart( values={"executor": "LocalExecutor"}, show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE], ) assert jmespath.search("spec.template.spec.containers[0].ports", docs[0]) == [ {"name": "worker-logs", "containerPort": 8793} ] def test_scheduler_should_omit_log_port_when_elasticsearch_enabled(self): docs = render_chart( values={ "executor": "LocalExecutor", "elasticsearch": { "enabled": True, "secretName": "test-elastic-secret", }, }, show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE], ) assert "ports" not in jmespath.search("spec.template.spec.containers[0]", docs[0]) def test_env_should_omit_elasticsearch_host_var_if_es_disabled(self): docs = render_chart( values={}, show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE], ) scheduler_env_keys = jmespath.search("spec.template.spec.containers[0].env[*].name", docs[0]) assert "AIRFLOW__ELASTICSEARCH__HOST" not in scheduler_env_keys def test_env_should_add_elasticsearch_host_var_if_es_enabled(self): docs = render_chart( values={ "elasticsearch": { "enabled": True, "secretName": "test-elastic-secret", }, }, show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE], ) scheduler_env = jmespath.search("spec.template.spec.containers[0].env", docs[0]) assert { "name": "AIRFLOW__ELASTICSEARCH__HOST", "valueFrom": {"secretKeyRef": {"name": "test-elastic-secret", "key": "connection"}}, } in scheduler_env def test_env_should_omit_elasticsearch_host_var_if_es_disabled_legacy(self): """AIRFLOW__ELASTICSEARCH__ELASTICSEARCH_HOST was the environment key prior to Airflow 1.10.4 (see https://github.com/apache/airflow/pull/5048), this test can be removed when the Helm chart no longer supports Airflow 1.10.3""" docs = render_chart( values={}, show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE], ) scheduler_env_keys = jmespath.search("spec.template.spec.containers[0].env[*].name", docs[0]) assert "AIRFLOW__ELASTICSEARCH__ELASTICSEARCH_HOST" not in scheduler_env_keys def test_env_should_add_elasticsearch_host_var_if_es_enabled_legacy(self): """AIRFLOW__ELASTICSEARCH__ELASTICSEARCH_HOST was the environment key prior to Airflow 1.10.4 (see https://github.com/apache/airflow/pull/5048), this test can be removed when the Helm chart no longer supports Airflow 1.10.3""" docs = render_chart( values={ "elasticsearch": { "enabled": True, "secretName": "test-elastic-secret", }, }, show_only=[SCHEDULER_DEPLOYMENT_TEMPLATE], ) scheduler_env = jmespath.search("spec.template.spec.containers[0].env", docs[0]) assert { "name": "AIRFLOW__ELASTICSEARCH__ELASTICSEARCH_HOST", "valueFrom": {"secretKeyRef": {"name": "test-elastic-secret", "key": "connection"}}, } in scheduler_env def test_airflow_cfg_should_set_remote_logging_false_if_es_disabled(self): docs = render_chart( values={}, show_only=[CONFIGMAP_TEMPLATE], ) airflow_cfg_text = jmespath.search('data."airflow.cfg"', docs[0]) logging_lines = LOGGING_CFG_REGEX.findall(airflow_cfg_text)[0].strip().splitlines() assert "remote_logging = False" in logging_lines def test_airflow_cfg_should_set_remote_logging_true_if_es_enabled(self): docs = render_chart( values={ "elasticsearch": { "enabled": True, "secretName": "test-elastic-secret", }, }, show_only=[CONFIGMAP_TEMPLATE], ) airflow_cfg_text = jmespath.search('data."airflow.cfg"', docs[0]) logging_lines = LOGGING_CFG_REGEX.findall(airflow_cfg_text)[0].strip().splitlines() assert "remote_logging = True" in logging_lines def test_airflow_cfg_should_set_remote_logging_false_if_es_disabled_legacy(self): """core.remote_logging was the config location prior to Airflow 2.0.0, this test can be removed when the Helm chart no longer supports Airflow 1.x""" docs = render_chart( values={}, show_only=[CONFIGMAP_TEMPLATE], ) airflow_cfg_text = jmespath.search('data."airflow.cfg"', docs[0]) core_lines = CORE_CFG_REGEX.findall(airflow_cfg_text)[0].strip().splitlines() assert "remote_logging = False" in core_lines def test_airflow_cfg_should_set_remote_logging_true_if_es_enabled_legacy(self): """core.remote_logging was the config location prior to Airflow 2.0.0, this test can be removed when the Helm chart no longer supports Airflow 1.x""" docs = render_chart( values={ "elasticsearch": { "enabled": True, "secretName": "test-elastic-secret", }, }, show_only=[CONFIGMAP_TEMPLATE], ) airflow_cfg_text = jmespath.search('data."airflow.cfg"', docs[0]) core_lines = CORE_CFG_REGEX.findall(airflow_cfg_text)[0].strip().splitlines() assert "remote_logging = True" in core_lines
TestElasticsearchConfig
python
mwaskom__seaborn
tests/_marks/test_bar.py
{ "start": 233, "end": 3492 }
class ____: def plot_bars(self, variables, mark_kws, layer_kws): p = Plot(**variables).add(Bar(**mark_kws), **layer_kws).plot() ax = p._figure.axes[0] return [bar for barlist in ax.containers for bar in barlist] def check_bar(self, bar, x, y, width, height): assert bar.get_x() == pytest.approx(x) assert bar.get_y() == pytest.approx(y) assert bar.get_width() == pytest.approx(width) assert bar.get_height() == pytest.approx(height) def test_categorical_positions_vertical(self): x = ["a", "b"] y = [1, 2] w = .8 bars = self.plot_bars({"x": x, "y": y}, {}, {}) for i, bar in enumerate(bars): self.check_bar(bar, i - w / 2, 0, w, y[i]) def test_categorical_positions_horizontal(self): x = [1, 2] y = ["a", "b"] w = .8 bars = self.plot_bars({"x": x, "y": y}, {}, {}) for i, bar in enumerate(bars): self.check_bar(bar, 0, i - w / 2, x[i], w) def test_numeric_positions_vertical(self): x = [1, 2] y = [3, 4] w = .8 bars = self.plot_bars({"x": x, "y": y}, {}, {}) for i, bar in enumerate(bars): self.check_bar(bar, x[i] - w / 2, 0, w, y[i]) def test_numeric_positions_horizontal(self): x = [1, 2] y = [3, 4] w = .8 bars = self.plot_bars({"x": x, "y": y}, {}, {"orient": "h"}) for i, bar in enumerate(bars): self.check_bar(bar, 0, y[i] - w / 2, x[i], w) def test_set_properties(self): x = ["a", "b", "c"] y = [1, 3, 2] mark = Bar( color=".8", alpha=.5, edgecolor=".3", edgealpha=.9, edgestyle=(2, 1), edgewidth=1.5, ) p = Plot(x, y).add(mark).plot() ax = p._figure.axes[0] for bar in ax.patches: assert bar.get_facecolor() == to_rgba(mark.color, mark.alpha) assert bar.get_edgecolor() == to_rgba(mark.edgecolor, mark.edgealpha) # See comments in plotting method for why we need these adjustments assert bar.get_linewidth() == mark.edgewidth * 2 expected_dashes = (mark.edgestyle[0] / 2, mark.edgestyle[1] / 2) assert bar.get_linestyle() == (0, expected_dashes) def test_mapped_properties(self): x = ["a", "b"] y = [1, 2] mark = Bar(alpha=.2) p = Plot(x, y, color=x, edgewidth=y).add(mark).plot() ax = p._figure.axes[0] colors = p._theme["axes.prop_cycle"].by_key()["color"] for i, bar in enumerate(ax.patches): assert bar.get_facecolor() == to_rgba(colors[i], mark.alpha) assert bar.get_edgecolor() == to_rgba(colors[i], 1) assert ax.patches[0].get_linewidth() < ax.patches[1].get_linewidth() def test_zero_height_skipped(self): p = Plot(["a", "b", "c"], [1, 0, 2]).add(Bar()).plot() ax = p._figure.axes[0] assert len(ax.patches) == 2 def test_artist_kws_clip(self): p = Plot(["a", "b"], [1, 2]).add(Bar({"clip_on": False})).plot() patch = p._figure.axes[0].patches[0] assert patch.clipbox is None
TestBar
python
pytorch__pytorch
torch/distributed/distributed_c10d.py
{ "start": 16165, "end": 18566 }
class ____: """ A class to build point-to-point operations for ``batch_isend_irecv``. This class builds the type of P2P operation, communication buffer, peer rank, Process Group, and tag. Instances of this class will be passed to ``batch_isend_irecv`` for point-to-point communications. Args: op (Callable): A function to send data to or receive data from a peer process. The type of ``op`` is either ``torch.distributed.isend`` or ``torch.distributed.irecv``. tensor (Tensor): Tensor to send or receive. peer (int, optional): Destination or source rank. group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. tag (int, optional): Tag to match send with recv. group_peer (int, optional): Destination or source rank. """ def __init__( self, op: Callable, tensor: torch.Tensor, peer: int | None = None, group: ProcessGroup | None = None, tag: int = 0, group_peer: int | None = None, ): """Init.""" self.op = op self.tensor = tensor self.group = _group_or_default_group(group) self.peer = _canonicalize_group_rank( self.group, peer, group_peer, return_global=True ) self.tag = tag self.group_peer = _canonicalize_group_rank(self.group, peer, group_peer) def __new__( cls, op: Callable, tensor: torch.Tensor, peer: int | None = None, group: ProcessGroup | None = None, tag: int = 0, group_peer: int | None = None, ): """Create and return a new instance of the class.""" _check_op(op) _check_single_tensor(tensor, "tensor") return object.__new__(cls) def __repr__(self): my_group_rank = get_rank(self.group) op_name = self.op.__name__ group_name = self.group.group_name if self.group else "default_pg" if "send" in op_name: s = my_group_rank d = self.group_peer elif "recv" in op_name: s = self.group_peer d = my_group_rank else: return super().__repr__() return f"P2POp({op_name} pg={group_name}, group_src={s}, group_dst={d}, {self.tensor.shape}, {self.tensor.dtype})"
P2POp
python
davidhalter__jedi
jedi/inference/base_value.py
{ "start": 12212, "end": 12494 }
class ____(Value): def __init__(self, inference_state, parent_context, tree_node): super().__init__(inference_state, parent_context) self.tree_node = tree_node def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
TreeValue
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/properties.py
{ "start": 16154, "end": 16623 }
class ____(ColumnProperty[_T], _DeclarativeMapped[_T]): """Declarative front-end for the :class:`.ColumnProperty` class. Public constructor is the :func:`_orm.column_property` function. .. versionchanged:: 2.0 Added :class:`_orm.MappedSQLExpression` as a Declarative compatible subclass for :class:`_orm.ColumnProperty`. .. seealso:: :class:`.MappedColumn` """ inherit_cache = True """:meta private:"""
MappedSQLExpression
python
dagster-io__dagster
python_modules/libraries/dagster-deltalake/dagster_deltalake/io_manager.py
{ "start": 1302, "end": 1377 }
class ____(str, Enum): pyarrow = "pyarrow" rust = "rust"
WriterEngine
python
getsentry__sentry
tests/sentry/users/api/endpoints/test_user_authenticator_enroll.py
{ "start": 12929, "end": 22510 }
class ____(APITestCase): endpoint = "sentry-api-0-user-authenticator-enroll" def setUp(self) -> None: self.organization = self.create_organization(owner=self.create_user("foo@example.com")) self.user = self.create_user("bar@example.com", is_superuser=False) self.login_as(user=self.user) self.require_2fa_for_organization() self.assertFalse(self.user.has_2fa()) @assume_test_silo_mode(SiloMode.REGION) def require_2fa_for_organization(self): self.organization.update(flags=F("flags").bitor(Organization.flags.require_2fa)) self.assertTrue(self.organization.flags.require_2fa.is_set) def _assert_pending_invite_details_in_session(self, om): assert self.client.session["invite_token"] == om.token assert self.client.session["invite_member_id"] == om.id assert self.client.session["invite_organization_id"] == om.organization_id def create_existing_om(self): with assume_test_silo_mode(SiloMode.REGION), outbox_runner(): OrganizationMember.objects.create( user_id=self.user.id, role="member", organization=self.organization ) def get_om_and_init_invite(self): with assume_test_silo_mode(SiloMode.REGION), outbox_runner(): om = OrganizationMember.objects.create( email="newuser@example.com", role="member", token="abc", organization=self.organization, ) resp = self.client.get( reverse( "sentry-api-0-organization-accept-organization-invite", args=[self.organization.slug, om.id, om.token], ) ) assert resp.status_code == 200 self._assert_pending_invite_details_in_session(om) return om def assert_invite_accepted(self, response, member_id: int) -> None: with assume_test_silo_mode(SiloMode.REGION): om = OrganizationMember.objects.get(id=member_id) assert om.user_id == self.user.id assert om.email is None with assume_test_silo_mode(SiloMode.REGION): serialized_member = serialize_member(om).get_audit_log_metadata() AuditLogEntry.objects.get( organization_id=self.organization.id, target_object=om.id, target_user=self.user, event=audit_log.get_event_id("MEMBER_ACCEPT"), data=serialized_member, ) assert not self.client.session.get("invite_token") assert not self.client.session.get("invite_member_id") @override_options({"system.url-prefix": "https://testserver"}) def setup_u2f(self, om): # We have to add the invite details back in to the session # prior to .save_session() since this re-creates the session property # when under test. See here for more details: # https://docs.djangoproject.com/en/2.2/topics/testing/tools/#django.test.Client.session self.session["webauthn_register_state"] = "state" self.session["invite_token"] = self.client.session["invite_token"] self.session["invite_member_id"] = self.client.session["invite_member_id"] self.session["invite_organization_id"] = self.client.session["invite_organization_id"] self.save_session() return self.get_success_response( "me", "u2f", method="post", **{"deviceName": "device name", "challenge": "challenge", "response": "response"}, ) def test_cannot_accept_invite_pending_invite__2fa_required(self) -> None: om = self.get_om_and_init_invite() with assume_test_silo_mode(SiloMode.REGION): om = OrganizationMember.objects.get(id=om.id) assert om.user_id is None assert om.email == "newuser@example.com" @mock.patch("sentry.auth.authenticators.U2fInterface.try_enroll", return_value=True) def test_accept_pending_invite__u2f_enroll(self, try_enroll: mock.MagicMock) -> None: om = self.get_om_and_init_invite() resp = self.setup_u2f(om) self.assert_invite_accepted(resp, om.id) @mock.patch("sentry.auth.authenticators.SmsInterface.validate_otp", return_value=True) @mock.patch("sentry.auth.authenticators.SmsInterface.send_text", return_value=True) @override_options({"sms.twilio-account": "twilio-account"}) def test_accept_pending_invite__sms_enroll( self, send_text: mock.MagicMock, validate_otp: mock.MagicMock ) -> None: # XXX: Pretend an unbound function exists. validate_otp.__func__ = None om = self.get_om_and_init_invite() # setup sms self.get_success_response( "me", "sms", method="post", **{"secret": "secret12", "phone": "1231234"} ) resp = self.get_success_response( "me", "sms", method="post", **{ "secret": "secret12", "phone": "1231234", "otp": "123123", "memberId": om.id, "token": om.token, }, ) assert validate_otp.call_count == 1 assert validate_otp.call_args == mock.call("123123") interface = Authenticator.objects.get_interface(user=self.user, interface_id="sms") assert isinstance(interface, SmsInterface) assert interface.phone_number == "1231234" self.assert_invite_accepted(resp, om.id) @mock.patch("sentry.auth.authenticators.TotpInterface.validate_otp", return_value=True) def test_accept_pending_invite__totp_enroll(self, validate_otp: mock.MagicMock) -> None: # XXX: Pretend an unbound function exists. validate_otp.__func__ = None om = self.get_om_and_init_invite() # setup totp self.get_success_response("me", "totp") resp = self.get_success_response( "me", "totp", method="post", **{"secret": "secret12", "otp": "1234", "memberId": om.id, "token": om.token}, ) interface = Authenticator.objects.get_interface(user=self.user, interface_id="totp") assert interface self.assert_invite_accepted(resp, om.id) @mock.patch("sentry.users.api.endpoints.user_authenticator_enroll.logger") @mock.patch("sentry.auth.authenticators.U2fInterface.try_enroll", return_value=True) def test_user_already_org_member(self, try_enroll: mock.MagicMock, log: mock.MagicMock) -> None: om = self.get_om_and_init_invite() self.create_existing_om() self.setup_u2f(om) with assume_test_silo_mode(SiloMode.REGION): assert not OrganizationMember.objects.filter(id=om.id).exists() log.info.assert_called_once_with( "Pending org invite not accepted - User already org member", extra={"organization_id": self.organization.id, "user_id": self.user.id}, ) @mock.patch("sentry.users.api.endpoints.user_authenticator_enroll.logger") @mock.patch("sentry.auth.authenticators.U2fInterface.try_enroll", return_value=True) def test_org_member_does_not_exist( self, try_enroll: mock.MagicMock, log: mock.MagicMock ) -> None: om = self.get_om_and_init_invite() # Mutate the OrganizationMember, putting it out of sync with the # pending member cookie. with ( assume_test_silo_mode(SiloMode.REGION), unguarded_write(using=router.db_for_write(OrganizationMember)), ): om.update(id=om.id + 1) self.setup_u2f(om) with assume_test_silo_mode(SiloMode.REGION): om = OrganizationMember.objects.get(id=om.id) assert om.user_id is None assert om.email == "newuser@example.com" assert log.exception.call_count == 1 assert log.exception.call_args[0][0] == "Invalid pending invite cookie" @mock.patch("sentry.users.api.endpoints.user_authenticator_enroll.logger") @mock.patch("sentry.auth.authenticators.U2fInterface.try_enroll", return_value=True) def test_invalid_token(self, try_enroll: mock.MagicMock, log: mock.MagicMock) -> None: om = self.get_om_and_init_invite() # Mutate the OrganizationMember, putting it out of sync with the # pending member cookie. with ( assume_test_silo_mode(SiloMode.REGION), unguarded_write(using=router.db_for_write(OrganizationMember)), ): om.update(token="123") self.setup_u2f(om) with assume_test_silo_mode(SiloMode.REGION): om = OrganizationMember.objects.get(id=om.id) assert om.user_id is None assert om.email == "newuser@example.com" @mock.patch("sentry.users.api.endpoints.user_authenticator_enroll.logger") @mock.patch("sentry.auth.authenticators.U2fInterface.try_enroll", return_value=True) @override_options({"system.url-prefix": "https://testserver"}) def test_enroll_without_pending_invite__no_error( self, try_enroll: mock.MagicMock, log: mock.MagicMock ) -> None: self.session["webauthn_register_state"] = "state" self.save_session() self.get_success_response( "me", "u2f", method="post", **{ "deviceName": "device name", "challenge": "challenge", "response": "response", }, ) assert log.error.called is False
AcceptOrganizationInviteTest
python
allegroai__clearml
clearml/backend_api/services/v2_20/tasks.py
{ "start": 191973, "end": 194687 }
class ____(Response): """ Response of tasks.dequeue endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict :param dequeued: Number of tasks dequeued (0 or 1) :type dequeued: int """ _service = "tasks" _action = "dequeue" _version = "2.20" _schema = { "definitions": {}, "properties": { "dequeued": { "description": "Number of tasks dequeued (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__( self, updated: Optional[int] = None, fields: Optional[dict] = None, dequeued: Optional[int] = None, **kwargs: Any ) -> None: super(DequeueResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields self.dequeued = dequeued @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self) -> Optional[dict]: return self._property_fields @fields.setter def fields(self, value: Optional[dict]) -> None: if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value @schema_property("dequeued") def dequeued(self) -> Optional[int]: return self._property_dequeued @dequeued.setter def dequeued(self, value: Optional[int]) -> None: if value is None: self._property_dequeued = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "dequeued", six.integer_types) self._property_dequeued = value
DequeueResponse
python
django__django
tests/order_with_respect_to/models.py
{ "start": 812, "end": 933 }
class ____(models.Model): entity = models.OneToOneField("Entity", primary_key=True, on_delete=models.CASCADE)
Dimension
python
huggingface__transformers
src/transformers/models/blip/processing_blip.py
{ "start": 872, "end": 1338 }
class ____(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_token_type_ids": False, "return_length": False, "verbose": True, }, }
BlipProcessorKwargs
python
getsentry__sentry
tests/sentry/api/serializers/test_grouptagvalue.py
{ "start": 170, "end": 716 }
class ____(TestCase): def test_with_user(self) -> None: user = self.create_user() grouptagvalue = GroupTagValue( group_id=0, key="sentry:user", value="username:ted", times_seen=1, first_seen=datetime(2018, 1, 1), last_seen=datetime(2018, 1, 1), ) result = serialize(grouptagvalue, user) assert result["key"] == "user" assert result["value"] == "username:ted" assert result["name"] == "ted"
GroupTagValueSerializerTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-gitlab/components.py
{ "start": 2014, "end": 2920 }
class ____(SubstreamPartitionRouter): def stream_slices(self) -> Iterable[StreamSlice]: parent_stream = self.parent_stream_configs[0].stream projects_list = self.config.get("projects_list", []) group_project_ids = [] for partition in parent_stream.generate_partitions(): for record in partition.read(): group_project_ids.extend([i["path_with_namespace"] for i in record["projects"]]) if group_project_ids: for project_id in group_project_ids: if not projects_list or projects_list and project_id in projects_list: yield StreamSlice(partition={"id": project_id.replace("/", "%2F")}, cursor_slice={}) else: for project_id in projects_list: yield StreamSlice(partition={"id": project_id.replace("/", "%2F")}, cursor_slice={})
ProjectStreamsPartitionRouter
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_self/SLF001.py
{ "start": 353, "end": 1763 }
class ____(metaclass=BazMeta): def __init__(self): self.public_thing = "foo" self._private_thing = "bar" self.__really_private_thing = "baz" self.bar = Bar() def __str__(self): return "foo" def get_bar(): if self.bar._private: # SLF001 return None if self.bar()._private: # SLF001 return None if Bar._private_thing: # SLF001 return None if Foo._private_thing: return None Foo = Bar() if Foo._private_thing: # SLF001 return None return self.bar def public_func(self): super().public_func() def _private_func(self): super()._private_func() def __really_private_func(self, arg): super().__really_private_func(arg) def __eq__(self, other): return self._private_thing == other._private_thing foo = Foo() print(foo._private_thing) # SLF001 print(foo.__really_private_thing) # SLF001 print(foo._private_func()) # SLF001 print(foo.__really_private_func(1)) # SLF001 print(foo.bar._private) # SLF001 print(foo()._private_thing) # SLF001 print(foo()._private_thing__) # SLF001 print(foo.public_thing) print(foo.public_func()) print(foo.__dict__) print(foo.__str__()) print(foo().__class__) print(foo._asdict()) import os os._exit() from enum import Enum Enum._missing_(1) # OK
Foo
python
tiangolo__fastapi
docs_src/body_updates/tutorial001.py
{ "start": 156, "end": 906 }
class ____(BaseModel): name: Union[str, None] = None description: Union[str, None] = None price: Union[float, None] = None tax: float = 10.5 tags: List[str] = [] items = { "foo": {"name": "Foo", "price": 50.2}, "bar": {"name": "Bar", "description": "The bartenders", "price": 62, "tax": 20.2}, "baz": {"name": "Baz", "description": None, "price": 50.2, "tax": 10.5, "tags": []}, } @app.get("/items/{item_id}", response_model=Item) async def read_item(item_id: str): return items[item_id] @app.put("/items/{item_id}", response_model=Item) async def update_item(item_id: str, item: Item): update_item_encoded = jsonable_encoder(item) items[item_id] = update_item_encoded return update_item_encoded
Item
python
python-attrs__attrs
tests/test_make.py
{ "start": 43174, "end": 46420 }
class ____: """ Tests for attribute conversion. """ def test_converter(self): """ Return value of converter is used as the attribute's value. """ C = make_class( "C", {"x": attr.ib(converter=lambda v: v + 1), "y": attr.ib()} ) c = C(1, 2) assert c.x == 2 assert c.y == 2 def test_converter_wrapped_takes_self(self): """ When wrapped and passed `takes_self`, the converter receives the instance that's being initializes -- and the return value is used as the field's value. """ def converter_with_self(v, self_): return v * self_.y @attr.define class C: x: int = attr.field( converter=attr.Converter(converter_with_self, takes_self=True) ) y = 42 assert 84 == C(2).x def test_converter_wrapped_takes_field(self): """ When wrapped and passed `takes_field`, the converter receives the field definition -- and the return value is used as the field's value. """ def converter_with_field(v, field): assert isinstance(field, attr.Attribute) return v * field.metadata["x"] @attr.define class C: x: int = attr.field( converter=attr.Converter( converter_with_field, takes_field=True ), metadata={"x": 42}, ) assert 84 == C(2).x @given(integers(), booleans()) def test_convert_property(self, val, init): """ Property tests for attributes using converter. """ C = make_class( "C", { "y": attr.ib(), "x": attr.ib( init=init, default=val, converter=lambda v: v + 1 ), }, ) c = C(2) assert c.x == val + 1 assert c.y == 2 @given(integers(), booleans()) def test_converter_factory_property(self, val, init): """ Property tests for attributes with converter, and a factory default. """ C = make_class( "C", { "y": attr.ib(), "x": attr.ib( init=init, default=Factory(lambda: val), converter=lambda v: v + 1, ), }, ) c = C(2) assert c.x == val + 1 assert c.y == 2 def test_convert_before_validate(self): """ Validation happens after conversion. """ def validator(inst, attr, val): raise RuntimeError("foo") C = make_class( "C", { "x": attr.ib(validator=validator, converter=lambda v: 1 / 0), "y": attr.ib(), }, ) with pytest.raises(ZeroDivisionError): C(1, 2) def test_frozen(self): """ Converters circumvent immutability. """ C = make_class( "C", {"x": attr.ib(converter=lambda v: int(v))}, frozen=True ) C("1")
TestConverter
python
mkdocs__mkdocs
mkdocs/tests/structure/page_tests.py
{ "start": 31991, "end": 32595 }
class ____(unittest.TestCase): def setUp(self): self.default = os.environ.get('SOURCE_DATE_EPOCH', None) os.environ['SOURCE_DATE_EPOCH'] = '0' def test_source_date_epoch(self): cfg = load_config() fl = File('testing.md', cfg.docs_dir, cfg.site_dir, cfg.use_directory_urls) pg = Page('Foo', fl, cfg) self.assertEqual(pg.update_date, '1970-01-01') def tearDown(self): if self.default is not None: os.environ['SOURCE_DATE_EPOCH'] = self.default else: del os.environ['SOURCE_DATE_EPOCH']
SourceDateEpochTests
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
{ "start": 8422, "end": 8608 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneMessageEvent, GrapheneStepEvent) name = "ExecutionStepSuccessEvent"
GrapheneExecutionStepSuccessEvent
python
mlflow__mlflow
mlflow/utils/logging_utils.py
{ "start": 421, "end": 1759 }
class ____: """ A Python stream for use with event logging APIs throughout MLflow (`eprint()`, `logger.info()`, etc.). This stream wraps `sys.stderr`, forwarding `write()` and `flush()` calls to the stream referred to by `sys.stderr` at the time of the call. It also provides capabilities for disabling the stream to silence event logs. """ def __init__(self): self._enabled = True def write(self, text): if self._enabled: sys.stderr.write(text) def flush(self): if self._enabled: sys.stderr.flush() @property def enabled(self): return self._enabled @enabled.setter def enabled(self, value): self._enabled = value MLFLOW_LOGGING_STREAM = MlflowLoggingStream() def disable_logging(): """ Disables the `MlflowLoggingStream` used by event logging APIs throughout MLflow (`eprint()`, `logger.info()`, etc), silencing all subsequent event logs. """ MLFLOW_LOGGING_STREAM.enabled = False def enable_logging(): """ Enables the `MlflowLoggingStream` used by event logging APIs throughout MLflow (`eprint()`, `logger.info()`, etc), emitting all subsequent event logs. This reverses the effects of `disable_logging()`. """ MLFLOW_LOGGING_STREAM.enabled = True
MlflowLoggingStream
python
jazzband__django-polymorphic
src/polymorphic/managers.py
{ "start": 189, "end": 1489 }
class ____(models.Manager): """ Manager for PolymorphicModel Usually not explicitly needed, except if a custom manager or a custom queryset class is to be used. """ queryset_class = PolymorphicQuerySet @classmethod def from_queryset(cls, queryset_class, class_name=None): manager = super().from_queryset(queryset_class, class_name=class_name) # also set our version, Django uses _queryset_class manager.queryset_class = queryset_class return manager def get_queryset(self): qs = self.queryset_class(self.model, using=self._db, hints=self._hints) if self.model._meta.proxy: qs = qs.instance_of(self.model) return qs def __str__(self): return ( f"{self.__class__.__name__} (PolymorphicManager) using {self.queryset_class.__name__}" ) # Proxied methods def non_polymorphic(self): return self.all().non_polymorphic() def instance_of(self, *args): return self.all().instance_of(*args) def not_instance_of(self, *args): return self.all().not_instance_of(*args) def get_real_instances(self, base_result_objects=None): return self.all().get_real_instances(base_result_objects=base_result_objects)
PolymorphicManager
python
tensorflow__tensorflow
tensorflow/python/tpu/tpu_embedding_v3_checkpoint_adapter_test.py
{ "start": 2197, "end": 22828 }
class ____(test.TestCase): def test_adapt_unsharded_to_sharded_simple(self): adapter = ( tpu_embedding_v3_checkpoint_adapter.TpuEmbeddingV3CheckpointAdapter( None ) ) layout = create_layout( tables_name="some_feature", stacked_table_name="some_feature", num_sparse_cores=8, num_partitions=2, unsharded_shape=(20, 4), unsharded_padded_shape=(24, 8), row_offset=0, shard_rotation=8, ) t = math_ops.range(start=0.0, limit=20.0, delta=1)[ :, None ] * array_ops.ones((20, 4)) adapter.initialize_reshard_callbacks({"some_feature": layout}) callback = adapter.get_reshard_callback("some_feature") # Check partition index 1 (second parition) self.assertAllEqual( callback.reshard([t], "128 8 8,12:0,8"), tf_constant([ [2, 2, 2, 2, 0, 0, 0, 0], [10, 10, 10, 10, 0, 0, 0, 0], [18, 18, 18, 18, 0, 0, 0, 0], [3, 3, 3, 3, 0, 0, 0, 0], [11, 11, 11, 11, 0, 0, 0, 0], [19, 19, 19, 19, 0, 0, 0, 0], [4, 4, 4, 4, 0, 0, 0, 0], [12, 12, 12, 12, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [5, 5, 5, 5, 0, 0, 0, 0], [13, 13, 13, 13, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ]), ) def test_adapt_unsharded_to_sharded_stacked(self): adapter = ( tpu_embedding_v3_checkpoint_adapter.TpuEmbeddingV3CheckpointAdapter( None ) ) layouts = { "two": create_layout( tables_name="two", stacked_table_name="one_two", num_sparse_cores=8, num_partitions=4, unsharded_shape=(32, 4), unsharded_padded_shape=(32, 8), row_offset=3, shard_rotation=1, total_rows_per_sparse_core_shard=7, ), "one": create_layout( tables_name="one", stacked_table_name="one_two", num_sparse_cores=8, num_partitions=4, unsharded_shape=(20, 4), unsharded_padded_shape=(24, 8), row_offset=0, shard_rotation=0, total_rows_per_sparse_core_shard=7, ), } one_t = math_ops.range(start=0.0, limit=20.0, delta=1)[ :, None ] * array_ops.ones((20, 4)) two_t = math_ops.range(start=50.0, limit=82.0, delta=1)[ :, None ] * array_ops.ones((32, 4)) adapter.initialize_reshard_callbacks(layouts) callback = adapter.get_reshard_callback("one") self.assertEqual(callback.object_name(), "one_two") updated_keys, updated_slices = callback.update_restore_inputs( "path/to/embedding/one/in/checkpoint", "56 8 14,28:0,8" ) self.assertAllEqual( updated_keys, [ "path/to/embedding/one/in/checkpoint", "path/to/embedding/two/in/checkpoint", ], ) self.assertAllEqual( updated_slices, ["20 4 0,20:0,4", "32 4 0,32:0,4"], ) actual = callback.reshard([one_t, two_t], "56 8 14,14:0,8") self.assertAllEqual( actual, tf_constant([ # table one shard 2 [2, 2, 2, 2, 0, 0, 0, 0], [10, 10, 10, 10, 0, 0, 0, 0], [18, 18, 18, 18, 0, 0, 0, 0], # table two shard 2 [51, 51, 51, 51, 0, 0, 0, 0], [59, 59, 59, 59, 0, 0, 0, 0], [67, 67, 67, 67, 0, 0, 0, 0], [75, 75, 75, 75, 0, 0, 0, 0], # table one shard 3 [3, 3, 3, 3, 0, 0, 0, 0], [11, 11, 11, 11, 0, 0, 0, 0], [19, 19, 19, 19, 0, 0, 0, 0], # table two shard 3 [52, 52, 52, 52, 0, 0, 0, 0], [60, 60, 60, 60, 0, 0, 0, 0], [68, 68, 68, 68, 0, 0, 0, 0], [76, 76, 76, 76, 0, 0, 0, 0], ]), ) # Check that full resharding works. actual_full = callback.reshard([one_t, two_t], "56 8 0,56:0,8") self.assertAllEqual( actual_full, tf_constant( [ # table one shard 0 [0, 0, 0, 0, 0, 0, 0, 0], [8, 8, 8, 8, 0, 0, 0, 0], [16, 16, 16, 16, 0, 0, 0, 0], # table two shard 0 [57, 57, 57, 57, 0, 0, 0, 0], [65, 65, 65, 65, 0, 0, 0, 0], [73, 73, 73, 73, 0, 0, 0, 0], [81, 81, 81, 81, 0, 0, 0, 0], # table one shard 1 [1, 1, 1, 1, 0, 0, 0, 0], [9, 9, 9, 9, 0, 0, 0, 0], [17, 17, 17, 17, 0, 0, 0, 0], # table two shard 1 [50, 50, 50, 50, 0, 0, 0, 0], [58, 58, 58, 58, 0, 0, 0, 0], [66, 66, 66, 66, 0, 0, 0, 0], [74, 74, 74, 74, 0, 0, 0, 0], # table one shard 2 [2, 2, 2, 2, 0, 0, 0, 0], [10, 10, 10, 10, 0, 0, 0, 0], [18, 18, 18, 18, 0, 0, 0, 0], # table two shard 2 [51, 51, 51, 51, 0, 0, 0, 0], [59, 59, 59, 59, 0, 0, 0, 0], [67, 67, 67, 67, 0, 0, 0, 0], [75, 75, 75, 75, 0, 0, 0, 0], # table one shard 3 [3, 3, 3, 3, 0, 0, 0, 0], [11, 11, 11, 11, 0, 0, 0, 0], [19, 19, 19, 19, 0, 0, 0, 0], # table two shard 3 [52, 52, 52, 52, 0, 0, 0, 0], [60, 60, 60, 60, 0, 0, 0, 0], [68, 68, 68, 68, 0, 0, 0, 0], [76, 76, 76, 76, 0, 0, 0, 0], # table one shard 4 [4, 4, 4, 4, 0, 0, 0, 0], [12, 12, 12, 12, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], # table two shard 4 [53, 53, 53, 53, 0, 0, 0, 0], [61, 61, 61, 61, 0, 0, 0, 0], [69, 69, 69, 69, 0, 0, 0, 0], [77, 77, 77, 77, 0, 0, 0, 0], # table one shard 5 [5, 5, 5, 5, 0, 0, 0, 0], [13, 13, 13, 13, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], # table two shard 5 [54, 54, 54, 54, 0, 0, 0, 0], [62, 62, 62, 62, 0, 0, 0, 0], [70, 70, 70, 70, 0, 0, 0, 0], [78, 78, 78, 78, 0, 0, 0, 0], # table one shard 6 [6, 6, 6, 6, 0, 0, 0, 0], [14, 14, 14, 14, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], # table two shard 6 [55, 55, 55, 55, 0, 0, 0, 0], [63, 63, 63, 63, 0, 0, 0, 0], [71, 71, 71, 71, 0, 0, 0, 0], [79, 79, 79, 79, 0, 0, 0, 0], # table one shard 7 [7, 7, 7, 7, 0, 0, 0, 0], [15, 15, 15, 15, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], # table two shard 7 [56, 56, 56, 56, 0, 0, 0, 0], [64, 64, 64, 64, 0, 0, 0, 0], [72, 72, 72, 72, 0, 0, 0, 0], [80, 80, 80, 80, 0, 0, 0, 0], ], dtype=dtypes.float32, ), ) self.assertAllEqual(callback._checkpoint_local_names, ["one", "two"]) self.assertAllEqual( [l.table_name for l in callback._to_shard_layout], ["one", "two"], ) def test_adapt_sharded_to_unsharded_simple(self): pass def test_adapt_sharded_to_unsharded_stacked(self): pass def test_is_layouts_same_works(self): layout = create_layout( tables_name="some_feature", stacked_table_name="some_feature", num_sparse_cores=8, num_partitions=8, unsharded_shape=(100, 4), unsharded_padded_shape=(128, 8), row_offset=0, shard_rotation=0, ) layouts = sparse_core_layout_pb2.SparseCoreTableLayouts() layouts.tables.append(layout) adapter = ( tpu_embedding_v3_checkpoint_adapter.TpuEmbeddingV3CheckpointAdapter( layouts ) ) self.assertTrue(adapter.is_layouts_same({layout.table_name: layout})) layout.num_sparse_cores = 3 self.assertFalse(adapter.is_layouts_same({layout.table_name: layout})) def test_adapt_to_different_sharded_stacked(self): source_layouts = { "one": create_layout( tables_name="one", stacked_table_name="one_two_three", num_sparse_cores=4, num_partitions=2, unsharded_shape=(6, 5), unsharded_padded_shape=(8, 8), row_offset=0, shard_rotation=0, total_rows_per_sparse_core_shard=6, ), "two": create_layout( tables_name="two", stacked_table_name="one_two_three", num_sparse_cores=4, num_partitions=2, unsharded_shape=(7, 4), unsharded_padded_shape=(8, 8), row_offset=2, shard_rotation=1, total_rows_per_sparse_core_shard=6, ), "three": create_layout( tables_name="three", stacked_table_name="one_two_three", num_sparse_cores=4, num_partitions=2, unsharded_shape=(15, 3), unsharded_padded_shape=(16, 8), row_offset=4, shard_rotation=2, total_rows_per_sparse_core_shard=6, ), } src_layouts_pb = sparse_core_layout_pb2.SparseCoreTableLayouts() src_layouts_pb.tables.extend(source_layouts.values()) sc_to_sc_adapter = ( tpu_embedding_v3_checkpoint_adapter.TpuEmbeddingV3CheckpointAdapter( layouts=src_layouts_pb ) ) target_layouts = { "one": create_layout( tables_name="one", stacked_table_name="one_two_three", num_sparse_cores=8, num_partitions=4, unsharded_shape=(6, 5), unsharded_padded_shape=(8, 8), row_offset=0, shard_rotation=0, total_rows_per_sparse_core_shard=4, ), "two": create_layout( tables_name="two", stacked_table_name="one_two_three", num_sparse_cores=8, num_partitions=4, unsharded_shape=(7, 4), unsharded_padded_shape=(8, 8), row_offset=1, shard_rotation=1, total_rows_per_sparse_core_shard=4, ), "three": create_layout( tables_name="three", stacked_table_name="one_two_three", num_sparse_cores=8, num_partitions=4, unsharded_shape=(15, 3), unsharded_padded_shape=(16, 8), row_offset=2, shard_rotation=2, total_rows_per_sparse_core_shard=4, ), } # this take a mapping[str, sparse_core_layout_pb2.SparseCoreTableLayout] sc_to_sc_adapter.initialize_reshard_callbacks(target_layouts) callback = sc_to_sc_adapter.get_reshard_callback("one_two_three") self.assertEqual(callback.object_name(), "one_two_three") updated_keys, updated_slices = callback.update_restore_inputs( "path/to/embedding/one_two_three/in/checkpoint", "24 8 6,12:0,8" ) self.assertAllEqual( updated_keys, [ "path/to/embedding/one_two_three/in/checkpoint", ], ) self.assertAllEqual( updated_slices, ["24 8 0,24:0,8"], ) one_two_three = tf_constant([ # table one shard 0 [0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 4, 4, 4, 0, 0, 0], [13, 13, 13, 13, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [102, 102, 102, 0, 0, 0, 0, 0], [106, 106, 106, 0, 0, 0, 0, 0], [110, 110, 110, 0, 0, 0, 0, 0], [114, 114, 114, 0, 0, 0, 0, 0], # table one shard 1 [1, 1, 1, 1, 1, 0, 0, 0], [5, 5, 5, 5, 5, 0, 0, 0], [10, 10, 10, 10, 0, 0, 0, 0], [14, 14, 14, 14, 0, 0, 0, 0], [103, 103, 103, 0, 0, 0, 0, 0], [107, 107, 107, 0, 0, 0, 0, 0], [111, 111, 111, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], # table one shard 2 [2, 2, 2, 2, 2, 0, 0, 0], [6, 6, 6, 6, 6, 0, 0, 0], [11, 11, 11, 11, 0, 0, 0, 0], [15, 15, 15, 15, 0, 0, 0, 0], [100, 100, 100, 0, 0, 0, 0, 0], [104, 104, 104, 0, 0, 0, 0, 0], [108, 108, 108, 0, 0, 0, 0, 0], [112, 112, 112, 0, 0, 0, 0, 0], # table one shard 3 [3, 3, 3, 3, 3, 0, 0, 0], [7, 7, 7, 7, 7, 0, 0, 0], [12, 12, 12, 12, 0, 0, 0, 0], [16, 16, 16, 16, 0, 0, 0, 0], [101, 101, 101, 0, 0, 0, 0, 0], [105, 105, 105, 0, 0, 0, 0, 0], [109, 109, 109, 0, 0, 0, 0, 0], [113, 113, 113, 0, 0, 0, 0, 0], ]) self.assertAllEqual( tf_constant([ # shard 2 [2, 2, 2, 2, 2, 0, 0, 0], [11, 11, 11, 11, 0, 0, 0, 0], [100, 100, 100, 0, 0, 0, 0, 0], [108, 108, 108, 0, 0, 0, 0, 0], # shard 3 [3, 3, 3, 3, 3, 0, 0, 0], [12, 12, 12, 12, 0, 0, 0, 0], [101, 101, 101, 0, 0, 0, 0, 0], [109, 109, 109, 0, 0, 0, 0, 0], # shard 4 [4, 4, 4, 4, 4, 0, 0, 0], [13, 13, 13, 13, 0, 0, 0, 0], [102, 102, 102, 0, 0, 0, 0, 0], [110, 110, 110, 0, 0, 0, 0, 0], # shard 5 [5, 5, 5, 5, 5, 0, 0, 0], [14, 14, 14, 14, 0, 0, 0, 0], [103, 103, 103, 0, 0, 0, 0, 0], [111, 111, 111, 0, 0, 0, 0, 0], ]), callback.reshard([one_two_three], "32 8 8,16:0,8"), ) def test_adapt_to_different_sc_table_stacking(self): source_layouts = { "one": create_layout( tables_name="one", stacked_table_name="one_two", num_sparse_cores=4, num_partitions=2, unsharded_shape=(6, 5), unsharded_padded_shape=(8, 8), row_offset=0, shard_rotation=0, total_rows_per_sparse_core_shard=4, ), "two": create_layout( tables_name="two", stacked_table_name="one_two", num_sparse_cores=4, num_partitions=2, unsharded_shape=(7, 4), unsharded_padded_shape=(8, 8), row_offset=2, shard_rotation=1, total_rows_per_sparse_core_shard=4, ), "three": create_layout( tables_name="three", stacked_table_name="three", num_sparse_cores=4, num_partitions=2, unsharded_shape=(15, 3), unsharded_padded_shape=(16, 8), row_offset=0, shard_rotation=0, total_rows_per_sparse_core_shard=4, ), } src_layouts_pb = sparse_core_layout_pb2.SparseCoreTableLayouts() src_layouts_pb.tables.extend(source_layouts.values()) sc_to_sc_adapter = ( tpu_embedding_v3_checkpoint_adapter.TpuEmbeddingV3CheckpointAdapter( layouts=src_layouts_pb ) ) target_layouts = { "one": create_layout( tables_name="one", stacked_table_name="one", num_sparse_cores=8, num_partitions=4, unsharded_shape=(6, 5), unsharded_padded_shape=(8, 8), row_offset=0, shard_rotation=0, total_rows_per_sparse_core_shard=1, ), "two": create_layout( tables_name="two", stacked_table_name="two_three", num_sparse_cores=8, num_partitions=4, unsharded_shape=(7, 4), unsharded_padded_shape=(8, 8), row_offset=0, shard_rotation=0, total_rows_per_sparse_core_shard=3, ), "three": create_layout( tables_name="three", stacked_table_name="two_three", num_sparse_cores=8, num_partitions=4, unsharded_shape=(15, 3), unsharded_padded_shape=(16, 8), row_offset=1, shard_rotation=1, total_rows_per_sparse_core_shard=3, ), } # this take a mapping[str, sparse_core_layout_pb2.SparseCoreTableLayout] sc_to_sc_adapter.initialize_reshard_callbacks(target_layouts) src_one_two = tf_constant([ # shard 0 [0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 4, 4, 4, 0, 0, 0], [13, 13, 13, 13, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], # shard 1 [1, 1, 1, 1, 1, 0, 0, 0], [5, 5, 5, 5, 5, 0, 0, 0], [10, 10, 10, 10, 0, 0, 0, 0], [14, 14, 14, 14, 0, 0, 0, 0], # shard 2 [2, 2, 2, 2, 2, 0, 0, 0], [6, 6, 6, 6, 6, 0, 0, 0], [11, 11, 11, 11, 0, 0, 0, 0], [15, 15, 15, 15, 0, 0, 0, 0], # shard 3 [3, 3, 3, 3, 3, 0, 0, 0], [7, 7, 7, 7, 7, 0, 0, 0], [12, 12, 12, 12, 0, 0, 0, 0], [16, 16, 16, 16, 0, 0, 0, 0], ]) src_three = tf_constant([ # shard 0 [100, 100, 100, 0, 0, 0, 0, 0], [104, 104, 104, 0, 0, 0, 0, 0], [108, 108, 108, 0, 0, 0, 0, 0], [112, 112, 112, 0, 0, 0, 0, 0], # shard 1 [101, 101, 101, 0, 0, 0, 0, 0], [105, 105, 105, 0, 0, 0, 0, 0], [109, 109, 109, 0, 0, 0, 0, 0], [113, 113, 113, 0, 0, 0, 0, 0], # shard 2 [102, 102, 102, 0, 0, 0, 0, 0], [106, 106, 106, 0, 0, 0, 0, 0], [110, 110, 110, 0, 0, 0, 0, 0], [114, 114, 114, 0, 0, 0, 0, 0], # shard 3 [103, 103, 103, 0, 0, 0, 0, 0], [107, 107, 107, 0, 0, 0, 0, 0], [111, 111, 111, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ]) with self.subTest("one"): callback = sc_to_sc_adapter.get_reshard_callback("one") self.assertEqual(callback.object_name(), "one") updated_keys, updated_slices = callback.update_restore_inputs( "path/to/embedding/one/in/checkpoint", "8 8 2,3:0,8" ) self.assertAllEqual( updated_keys, [ "path/to/embedding/one_two/in/checkpoint", ], ) self.assertAllEqual( updated_slices, ["16 8 0,16:0,8"], ) self.assertAllEqual( tf_constant([ [2, 2, 2, 2, 2, 0, 0, 0], [3, 3, 3, 3, 3, 0, 0, 0], [4, 4, 4, 4, 4, 0, 0, 0], [5, 5, 5, 5, 5, 0, 0, 0], ]), callback.reshard([src_one_two], "8 8 2,4:0,8"), ) self.assertAllEqual( tf_constant([ [4, 4, 4, 4, 4, 0, 0, 0], [5, 5, 5, 5, 5, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ]), callback.reshard([src_one_two], "8 8 4,4:0,8"), ) with self.subTest("two_three"): callback = sc_to_sc_adapter.get_reshard_callback("two_three") self.assertEqual(callback.object_name(), "two_three") updated_keys, updated_slices = callback.update_restore_inputs( "path/to/embedding/two_three/in/checkpoint", "24 8 8,6:0,8" ) self.assertAllEqual( updated_keys, [ "path/to/embedding/one_two/in/checkpoint", "path/to/embedding/three/in/checkpoint", ], ) self.assertAllEqual( updated_slices, ["16 8 0,16:0,8", "16 8 0,16:0,8"], ) self.assertAllEqual( tf_constant([ # shard 2 [12, 12, 12, 12, 0, 0, 0, 0], [101, 101, 101, 0, 0, 0, 0, 0], [109, 109, 109, 0, 0, 0, 0, 0], # shard 3 [13, 13, 13, 13, 0, 0, 0, 0], [102, 102, 102, 0, 0, 0, 0, 0], [110, 110, 110, 0, 0, 0, 0, 0], ]), callback.reshard([src_one_two, src_three], "24 8 6,6:0,8"), ) self.assertAllEqual( tf_constant([ # shard 6 [16, 16, 16, 16, 0, 0, 0, 0], [105, 105, 105, 0, 0, 0, 0, 0], [113, 113, 113, 0, 0, 0, 0, 0], # shard 7 [0, 0, 0, 0, 0, 0, 0, 0], [106, 106, 106, 0, 0, 0, 0, 0], [114, 114, 114, 0, 0, 0, 0, 0], ]), callback.reshard([src_one_two, src_three], "24 8 18,6:0,8"), ) if __name__ == "__main__": v2_compat.enable_v2_behavior() test.main()
TpuEmbeddingV3CheckpointAdapterTest
python
pydata__xarray
xarray/coding/cftime_offsets.py
{ "start": 15635, "end": 16462 }
class ____(QuarterOffset): # When converting a string to an offset, pandas converts # 'QS' to a QuarterBegin offset starting in the month of # January. When creating a QuarterBegin offset directly # from the constructor, however, the default month is March. # We follow that behavior here. _default_month = 3 _freq = "QS" _day_option = "start" def rollforward(self, date): """Roll date forward to nearest start of quarter""" if self.onOffset(date): return date else: return date + QuarterBegin(month=self.month) def rollback(self, date): """Roll date backward to nearest start of quarter""" if self.onOffset(date): return date else: return date - QuarterBegin(month=self.month)
QuarterBegin
python
pypa__pip
tests/unit/test_exceptions.py
{ "start": 8320, "end": 14084 }
class ____: def test_complete(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke. :(", context="Something went wrong\nvery wrong.", note_stmt="You did something wrong, which is what caused this error.", hint_stmt="Do it better next time, by trying harder.", ) assert rendered(err) == textwrap.dedent( """\ error: test-diagnostic × Oh no! │ It broke. :( ╰─> Something went wrong very wrong. note: You did something wrong, which is what caused this error. hint: Do it better next time, by trying harder. """ ) def test_complete_color(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke.", context="Something went wrong\nvery wrong.", note_stmt="You did something wrong.", hint_stmt="Do it better next time, by trying harder.", ) def esc(code: str = "0") -> str: return f"\x1b[{code}m" assert rendered(err, color=True) == textwrap.dedent( f"""\ {esc("1;31")}error{esc("0")}: {esc("1")}test-diagnostic{esc("0")} {esc("31")}×{esc("0")} Oh no! {esc("31")}│{esc("0")} It broke. {esc("31")}╰─>{esc("0")} Something went wrong {esc("31")} {esc("0")} very wrong. {esc("1;35")}note{esc("0")}: You did something wrong. {esc("1;36")}hint{esc("0")}: Do it better next time, by trying harder. """ ) def test_no_context(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke. :(", context=None, note_stmt="You did something wrong, which is what caused this error.", hint_stmt="Do it better next time, by trying harder.", ) assert rendered(err) == textwrap.dedent( """\ error: test-diagnostic × Oh no! It broke. :( note: You did something wrong, which is what caused this error. hint: Do it better next time, by trying harder. """ ) def test_no_note(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke. :(", context="Something went wrong\nvery wrong.", note_stmt=None, hint_stmt="Do it better next time, by trying harder.", ) assert rendered(err) == textwrap.dedent( """\ error: test-diagnostic × Oh no! │ It broke. :( ╰─> Something went wrong very wrong. hint: Do it better next time, by trying harder. """ ) def test_no_hint(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke. :(", context="Something went wrong\nvery wrong.", note_stmt="You did something wrong, which is what caused this error.", hint_stmt=None, ) assert rendered(err) == textwrap.dedent( """\ error: test-diagnostic × Oh no! │ It broke. :( ╰─> Something went wrong very wrong. note: You did something wrong, which is what caused this error. """ ) def test_no_context_no_hint(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke. :(", context=None, note_stmt="You did something wrong, which is what caused this error.", hint_stmt=None, ) assert rendered(err) == textwrap.dedent( """\ error: test-diagnostic × Oh no! It broke. :( note: You did something wrong, which is what caused this error. """ ) def test_no_context_no_note(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke. :(", context=None, note_stmt=None, hint_stmt="Do it better next time, by trying harder.", ) assert rendered(err) == textwrap.dedent( """\ error: test-diagnostic × Oh no! It broke. :( hint: Do it better next time, by trying harder. """ ) def test_no_hint_no_note(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke. :(", context="Something went wrong\nvery wrong.", note_stmt=None, hint_stmt=None, ) assert rendered(err) == textwrap.dedent( """\ error: test-diagnostic × Oh no! │ It broke. :( ╰─> Something went wrong very wrong. """ ) def test_no_hint_no_note_no_context(self) -> None: err = DiagnosticPipError( reference="test-diagnostic", message="Oh no!\nIt broke. :(", context=None, hint_stmt=None, note_stmt=None, ) assert rendered(err) == textwrap.dedent( """\ error: test-diagnostic × Oh no! It broke. :( """ )
TestDiagnosticPipErrorPresentation_Unicode
python
Textualize__textual
docs/examples/styles/text_style_all.py
{ "start": 420, "end": 1015 }
class ____(App): CSS_PATH = "text_style_all.tcss" def compose(self): yield Grid( Label("none\n" + TEXT, id="lbl1"), Label("bold\n" + TEXT, id="lbl2"), Label("italic\n" + TEXT, id="lbl3"), Label("reverse\n" + TEXT, id="lbl4"), Label("strike\n" + TEXT, id="lbl5"), Label("underline\n" + TEXT, id="lbl6"), Label("bold italic\n" + TEXT, id="lbl7"), Label("reverse strike\n" + TEXT, id="lbl8"), ) if __name__ == "__main__": app = AllTextStyleApp() app.run()
AllTextStyleApp
python
tqdm__tqdm
tqdm/utils.py
{ "start": 3151, "end": 3473 }
class ____(object): """ >>> a = FormatReplace('something') >>> f"{a:5d}" 'something' """ # NOQA: P102 def __init__(self, replace=''): self.replace = replace self.format_called = 0 def __format__(self, _): self.format_called += 1 return self.replace
FormatReplace
python
allegroai__clearml
clearml/backend_interface/task/repo/scriptinfo.py
{ "start": 12902, "end": 27517 }
class ____(object): _thread = None _exit_event = None _sync_event = None _sample_frequency = 30.0 _first_sample_frequency = 3.0 _jupyter_history_logger = None _store_notebook_artifact = deferred_config("development.store_jupyter_notebook_artifact", True) @classmethod def _get_logger(cls) -> logging.Logger: return get_logger("Repository Detection") @classmethod def observer( cls, jupyter_notebook_filename: str, notebook_name: str = None, log_history: bool = False, ) -> None: if cls._exit_event is None: cls._exit_event = SafeEvent() if cls._sync_event is None: cls._sync_event = SafeEvent() if cls._thread is not None: # order of signaling is important! cls._exit_event.set() cls._sync_event.set() cls._thread.join() if log_history and cls._jupyter_history_logger is None: cls._jupyter_history_logger = _JupyterHistoryLogger() cls._jupyter_history_logger.hook() cls._sync_event.clear() cls._exit_event.clear() cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, notebook_name)) cls._thread.daemon = True cls._thread.start() @classmethod def signal_sync(cls, *_: Any, **__: Any) -> None: if cls._sync_event is None: return cls._sync_event.set() @classmethod def close(cls) -> None: if not cls._thread: return cls._exit_event.set() cls._sync_event.set() cls._thread.join() cls._thread = None @classmethod def _daemon(cls, jupyter_notebook_filename: str, notebook_name: Optional[str] = None) -> None: from clearml import Task # load jupyter notebook package # noinspection PyBroadException try: # noinspection PyBroadException try: # noinspection PyPackageRequirements from nbconvert.exporters import PythonExporter # noqa _script_exporter = PythonExporter() except Exception: _script_exporter = None if _script_exporter is None: # noinspection PyPackageRequirements from nbconvert.exporters.script import ScriptExporter # noqa _script_exporter = ScriptExporter() except Exception as ex: cls._get_logger().warning("Could not read Jupyter Notebook: {}".format(ex)) if isinstance(ex, ImportError): module_name = getattr(ex, "name", None) if module_name: cls._get_logger().warning( 'Please install {name} using "pip install {name}"'.format(name=module_name) ) _script_exporter = None # load pigar # noinspection PyBroadException try: from ....utilities.pigar.reqs import ( get_installed_pkgs_detail, file_import_modules, ) from ....utilities.pigar.modules import ReqsModules from ....utilities.pigar.log import logger logger.setLevel(logging.WARNING) except Exception: file_import_modules = None # load IPython # noinspection PyBroadException try: # noinspection PyPackageRequirements from IPython import get_ipython except Exception: # should not happen get_ipython = None # setup local notebook files if jupyter_notebook_filename: notebook = Path(jupyter_notebook_filename) local_jupyter_filename = jupyter_notebook_filename else: notebook = None folder = mkdtemp(suffix=".notebook") if notebook_name.endswith(".py"): notebook_name = notebook_name.replace(".py", ".ipynb") if not notebook_name.endswith(".ipynb"): notebook_name += ".ipynb" local_jupyter_filename = Path(folder) / notebook_name last_update_ts = None last_colab_hash = None counter = 0 prev_script_hash = None # noinspection PyBroadException try: from ....version import __version__ our_module = cls.__module__.split(".")[0], __version__ except Exception: our_module = None # noinspection PyBroadException try: import re replace_ipython_pattern = re.compile(r"\n([ \t]*)get_ipython\([ \t]*\)") replace_ipython_display_pattern = re.compile(r"\n([ \t]*)display\(") except Exception: replace_ipython_pattern = None replace_ipython_display_pattern = None # main observer loop, check if we need to exit while not cls._exit_event.wait(timeout=0.0): # wait for timeout or sync event cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency) cls._sync_event.clear() counter += 1 # noinspection PyBroadException try: # if there is no task connected, do nothing task = Task.current_task() if not task: continue script_code = None fmodules = None current_cell = None # if we have a local file: if notebook: if not notebook.exists(): continue # check if notebook changed if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0: continue last_update_ts = notebook.stat().st_mtime elif notebook_name: # this is a colab, let's try to get the notebook # noinspection PyProtectedMember colab_name, colab_notebook = ScriptInfo._get_colab_notebook() if colab_notebook: current_colab_hash = hash(colab_notebook) if current_colab_hash == last_colab_hash: continue last_colab_hash = current_colab_hash with open(local_jupyter_filename.as_posix(), "wt") as f: f.write(colab_notebook) else: # something went wrong we will try again later continue else: # serialize notebook to a temp file if cls._jupyter_history_logger: ( script_code, current_cell, ) = cls._jupyter_history_logger.history_to_str() else: # noinspection PyBroadException try: # noinspection PyBroadException try: os.unlink(local_jupyter_filename) except Exception: pass get_ipython().run_line_magic("history", "-t -f {}".format(local_jupyter_filename)) with open(local_jupyter_filename, "r") as f: script_code = f.read() # load the modules from ....utilities.pigar.modules import ImportedModules fmodules = ImportedModules() for nm in set([str(m).split(".")[0] for m in sys.modules]): fmodules.add(nm, "notebook", 0) except Exception: continue if _script_exporter is None: current_script_hash = "error_notebook_not_found.py" requirements_txt = "" conda_requirements = "" else: # get notebook python script if script_code is None and local_jupyter_filename: script_code, _ = _script_exporter.from_filename(local_jupyter_filename) if cls._store_notebook_artifact: # also upload the jupyter notebook as artifact task.upload_artifact( name="notebook", artifact_object=Path(local_jupyter_filename), preview="See `notebook preview` artifact", metadata={"UPDATE": datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")}, wait_on_upload=True, ) # noinspection PyBroadException try: from nbconvert.exporters import HTMLExporter # noqa html, _ = HTMLExporter().from_filename(filename=local_jupyter_filename) local_html = Path(gettempdir()) / "notebook_{}.html".format(task.id) with open(local_html.as_posix(), "wt", encoding="utf-8") as f: f.write(html) task.upload_artifact( name="notebook preview", artifact_object=local_html, preview="Click `FILE PATH` link", metadata={"UPDATE": datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S")}, delete_after_upload=True, wait_on_upload=True, ) except Exception: pass current_script_hash = hash(script_code + (current_cell or "")) if prev_script_hash and prev_script_hash == current_script_hash: continue requirements_txt = "" conda_requirements = "" # parse jupyter python script and prepare pip requirements (pigar) # if backend supports requirements if file_import_modules and Session.check_min_api_version("2.2"): if fmodules is None: fmodules, _ = file_import_modules( notebook.parts[-1] if notebook else "notebook", script_code, ) if current_cell: cell_fmodules, _ = file_import_modules( notebook.parts[-1] if notebook else "notebook", current_cell, ) # noinspection PyBroadException try: fmodules |= cell_fmodules except Exception: pass # add current cell to the script if current_cell: script_code += "\n" + current_cell fmodules = ScriptRequirements.add_trains_used_packages(fmodules) # noinspection PyUnboundLocalVariable installed_pkgs = get_installed_pkgs_detail() # make sure we are in installed packages if our_module and (our_module[0] not in installed_pkgs): installed_pkgs[our_module[0]] = our_module # noinspection PyUnboundLocalVariable reqs = ReqsModules() for name in fmodules: if name in installed_pkgs: # handle namespace packages, which are returned as flat dicts of format # {mapping_pkg_name: (pkg_name, version), ...} if isinstance(installed_pkgs[name], dict): for subpackage_name, subpackage in installed_pkgs[name].items(): pkg_name, version = subpackage reqs.add( pkg_name, version, fmodules.get(subpackage_name, fmodules[name]), ) else: pkg_name, version = installed_pkgs[name] reqs.add(pkg_name, version, fmodules[name]) ( requirements_txt, conda_requirements, ) = ScriptRequirements.create_requirements_txt(reqs) # remove ipython direct access from the script code # we will not be able to run them anyhow # probably should be better dealt with, because multi line will break it if replace_ipython_pattern: script_code = replace_ipython_pattern.sub(r"\n# \g<1>get_ipython()", script_code) if replace_ipython_display_pattern: script_code = replace_ipython_display_pattern.sub(r"\n\g<1>print(", script_code) # update script prev_script_hash = current_script_hash data_script = task.data.script data_script.diff = script_code data_script.requirements = { "pip": requirements_txt, "conda": conda_requirements, } # noinspection PyProtectedMember task._update_script(script=data_script) # update requirements # noinspection PyProtectedMember task._update_requirements(requirements=requirements_txt) except Exception: pass
_JupyterObserver
python
pytorch__pytorch
torch/ao/quantization/fx/_model_report/model_report.py
{ "start": 730, "end": 29740 }
class ____: r""" The ModelReport class aims to provide users an easy way to diagnose issues that they run into with their models. The class works with all traceable GraphModules to help diagnose issues, though the requirements on the type of model more-so depends on the specific report the user is trying to generate. With respect to the reports, the ModelReport class is initialized with a set of Detector classes, each of which generate reports on quantization configuration issues a use might have. Currently supports generating reports on: - Suggestions for per-channel vs. per-tensor quantization (nn.Module) - Suggestions for dynamic vs static quantization for linear layers (Graph Modules) - Suggestions for input-weight equalization for linear and conv layers (Graph Modules) - Suggestions for outlier detection for all layers (Graph Modules) The ModelReport class has the primary functionality of inserting observers (primarily the ModelReportObserver) where needed for each detector to gather the information it needs, and then after calibration, the ModelReport class compiles the report generated by each Detector class into a single report to return to the user. It also has the capability to remove all the observers it inserted as well. * :attr:`_model` The model we wish to generate the report for. Must be a traceable GraphModule * :attr:`_desired_report_detectors` The set of Detectors representing desired reports from the ModelReport class Make sure that these are all unique types of detectors [do not have more than 1 of the same class] * :attr:`_desired_detector_names` The set of detector names of the _desired_report_detectors. This set is generated by calling the get_detector_name() of each detector * :attr:`_detector_name_to_observer_fqns` The mapping from each detector to fqns of observers of interest The purpose of this is to keep track of what observers were inserted for each detector, so that they can be removed at the end if desired * :attr:`_prepared_flag` A boolean flag that keeps track of whether we have prepared the model or not This is to ensure we only insert observers once with the ModelReport instance * :attr:`_removed_observers` A boolean to track if we have removed observers already The purpose is to ensure we don't attempt to remove observers twice with the same ModelReport instance. This also allows the functionality where we can generate the report multiple times as long as we haven't removed the observers yet. Note: This class was initially designed to work with the Fx Graph Mode workflow in mind. However, full functionality is available as long as there is a traceable GraphModule that is being used. One method to get a traceable GraphModule without going through the Fx workflow is to use the QuantizationTracer class. General Flow for Fx workflow: 1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects and model 2.) Prepare your model with prepare_fx 3.) Call model_report.prepare_detailed_calibration to add relevant observers 4.) Calibrate your model with data 5.) Call model_report.generate_report on your model to generate report and optionally remove added observers Optional 6.) Call model_report.generate_visualizer to get a ModelReportVisualizer instance 7.) To help in parsing report information and debugging, view report info as a: - Table - Histogram - Line plot 8.) Call model_report.generate_qconfigs to generate the qconfigs based on the report suggestions Example (with QuantizationTracer): >>> # xdoctest: +SKIP >>> # get the necessary qconfig >>> config = PrepareCustomConfig() >>> skipped_module_names, skipped_module_classes = ( ... get_skipped_module_name_and_classes(config, False) ... ) >>> # initialize our model and get GraphModule >>> model = SomeModel() >>> tracer = QuantizationTracer(skipped_module_names, skipped_module_classes) >>> graph_module = GraphModule(model, tracer.trace(model)) >>> # get our set of detectors and ModelReport instance >>> detector_set = set( ... [ ... DynamicStaticDetector(tolerance=0.5), ... InputWeightEqualizationDetector(ratio_threshold=0.7), ... ] ... ) >>> tracer_reporter = ModelReport(graph_module, tracer_detector_set) >>> # now we insert the observers and calibrate the model >>> tracer_model_with_observers = tracer_reporter.prepare_detailed_calibration() >>> for i in range(num_callibration_batches): >>> example_input = get_callibration_input() >>> tracer_model_with_observers(example_input) >>> # finally we generate the reports and optionally remove the observers we inserted >>> reports = tracer_reporter.generate_model_report( ... remove_inserted_observers=True ... ) >>> # Optional: we can generate the qconfig mapping based on the suggestions >>> qconfigs = model_report.generate_qconfig_mapping() >>> # Optional: we can generate the equalization mapping based on the suggestions >>> qconfigs = model_report.generate_equalization_mapping() >>> # Optional: we get a ModelReportVisualizer instance to do any visualizations desired >>> model_report_visualizer = tracer_reporter.generate_visualizer() """ def __init__(self, model: GraphModule, desired_report_detectors: set[DetectorBase]): if len(desired_report_detectors) == 0: raise ValueError("Should include at least 1 desired report") # keep track of the model we wish to generate report for self._model: GraphModule = model # keep the reports private so they can't be modified self._desired_report_detectors = desired_report_detectors self._desired_detector_names = { detector.get_detector_name() for detector in desired_report_detectors } # keep a mapping of desired reports to observers of interest # this is to get the readings, and to remove them, can create a large set # this set can then be used to traverse the graph and remove added observers self._detector_name_to_observer_fqns: dict[str, set[str]] = {} # initialize each report to have empty set of observers of interest for desired_report in self._desired_detector_names: self._detector_name_to_observer_fqns[desired_report] = set() # flags to ensure that we can only prepare and remove observers once self._prepared_flag = False self._removed_observers = False # store the reports that we generated for visualization purposes # initially empty since no reports generated self._generated_reports: dict[str, dict] = {} def get_desired_reports_names(self) -> set[str]: """Returns a copy of the desired reports for viewing""" return self._desired_detector_names.copy() def get_observers_of_interest(self) -> dict[str, set[str]]: """Returns a copy of the observers of interest for viewing""" return self._detector_name_to_observer_fqns.copy() def prepare_detailed_calibration(self) -> GraphModule: r""" Takes in a graph model and inserts the following observers: - ModelReportObserver Each observer is inserted based on the desired_reports into the relevant locations Right now, each report in self._desired_detector_names has independent insertions However, if a module already has a Observer of the same type, the insertion will not occur This is because all of the same type of Observer collect same information, so redundant Returns the same GraphModule with the observers inserted """ # if already prepared once, cannot prepare again if self._prepared_flag: raise ValueError( "Already ran preparing detailed calibration. Run the report generation next after calibration." ) # loop through each detector, find where placements should be, and keep track insert_observers_fqns: dict[str, Any] = {} for detector in self._desired_report_detectors: # determine observer points for each detector obs_fqn_to_info = detector.determine_observer_insert_points(self._model) # map each insert point to the observer to use insert_observers_fqns.update(obs_fqn_to_info) # update the set of observers this report cares about self._detector_name_to_observer_fqns[detector.get_detector_name()] = set( obs_fqn_to_info.keys() ) # now insert all the observers at their desired locations for observer_fqn in insert_observers_fqns: target_node = insert_observers_fqns[observer_fqn][DETECTOR_TARGET_NODE_KEY] insert_obs = insert_observers_fqns[observer_fqn][DETECTOR_OBS_TO_INSERT_KEY] insert_post = insert_observers_fqns[observer_fqn][DETECTOR_IS_POST_OBS_KEY] observer_args = insert_observers_fqns[observer_fqn][DETECTOR_OBS_ARGS_KEY] self._insert_observer_around_module( observer_fqn, target_node, insert_obs, observer_args, insert_post ) self._prepared_flag = True return self._model def _insert_observer_around_module( self, obs_fqn: str, target_node: torch.fx.node.Node, obs_to_insert: ObserverBase, observer_args: tuple, insert_post: bool, ): r""" Helper function that inserts the observer into both the graph structure and the module of the model Args node_fqn (str): The fully qualified name of the observer we want to insert target_node (torch.fx.node.Node): The node in model we are inserting observers around obs_to_insert (ObserverBase): The observer we are inserting around target_node observer_args (Tuple): The arguments we want to pass into the observer insert_post (bool): whether this is meant to be a post observer for this node """ # if we are inserting post, then our target node is the next node if insert_post: target_node = target_node.next with self._model.graph.inserting_before(target_node): self._model.add_submodule(obs_fqn, obs_to_insert) self._model.graph.create_node( op="call_module", target=obs_fqn, args=observer_args ) # recompile model after inserts are made self._model.recompile() def _get_node_from_fqn(self, node_fqn: str) -> torch.fx.node.Node: r""" Takes in a node fqn and returns the node based on the fqn Args node_fqn (str): The fully qualified name of the node we want to find in model Returns the Node object of the given node_fqn otherwise returns None """ node_to_return = None for node in self._model.graph.nodes: # if the target matches the fqn, it's the node we are looking for if node.target == node_fqn: node_to_return = node break if node_to_return is None: raise ValueError("The node_fqn is was not found within the module.") # assert for MyPy if not isinstance(node_to_return, torch.fx.node.Node): raise AssertionError("node_to_return must be a torch.fx.node.Node") return node_to_return def generate_model_report( self, remove_inserted_observers: bool ) -> dict[str, tuple[str, dict]]: r""" Generates all the requested reports. Note: You should have calibrated the model with relevant data before calling this The reports generated are specified by the desired_reports specified in desired_reports Can optionally remove all the observers inserted by the ModelReport instance Args: remove_inserted_observers (bool): True to remove the observers inserted by this ModelReport instance Returns a mapping of each desired report name to a tuple with: The textual summary of that report information A dictionary containing relevant statistics or information for that report Note: Throws exception if we try to generate report on model we already removed observers from Throws exception if we try to generate report without preparing for calibration """ # if we haven't prepped model for calibration, then we shouldn't generate report yet if not self._prepared_flag: raise Exception( # noqa: TRY002 "Cannot generate report without preparing model for calibration" ) # if we already removed the observers, we cannot generate report if self._removed_observers: raise Exception( # noqa: TRY002 "Cannot generate report on model you already removed observers from" ) # keep track of all the reports of interest and their outputs reports_of_interest = {} for detector in self._desired_report_detectors: # generate the individual report for the detector report_output = detector.generate_detector_report(self._model) reports_of_interest[detector.get_detector_name()] = report_output # if user wishes to remove inserted observers, go ahead and remove if remove_inserted_observers: self._removed_observers = True # get the set of all Observers inserted by this instance of ModelReport all_observers_of_interest: set[str] = set() for desired_report in self._detector_name_to_observer_fqns: observers_of_interest = self._detector_name_to_observer_fqns[ desired_report ] all_observers_of_interest.update(observers_of_interest) # go through all_observers_of_interest and remove them from the graph and model for observer_fqn in all_observers_of_interest: # remove the observer from the model self._model.delete_submodule(observer_fqn) # remove the observer from the graph structure node_obj = self._get_node_from_fqn(observer_fqn) if node_obj: self._model.graph.erase_node(node_obj) else: raise ValueError("Node no longer exists in GraphModule structure") # remember to recompile the model self._model.recompile() # save the generated reports for visualization purposes saved_reports: dict[str, dict] = { report_name: report_tuple[1] for report_name, report_tuple in reports_of_interest.items() } self._generated_reports = saved_reports # return the reports of interest return reports_of_interest def _is_same_info_for_same_key(self, info_dict_a: dict, info_dict_b: dict) -> bool: r""" Takes in two dictionaries and ensures that any common keys between the two have the same values. Args: info_dict_a (Dict): First dictionary we wish to compare info_dict_b (Dict): Second dictionary we wish to compare Returns True if all shared keys have same values, false otherwise """ # get the set of keys for both dict_a_keys: set = set(info_dict_a.keys()) dict_b_keys: set = set(info_dict_b.keys()) # get the insersection keys and check if same value for both dicts intersecting_keys: set = dict_a_keys.intersection(dict_b_keys) for key in intersecting_keys: dict_a_val = info_dict_a[key] dict_b_val = info_dict_b[key] # if it's a tensor we have to handle separately if type(dict_a_val) is torch.Tensor: # if dict_b_val not tensor, automatically false if ( type(dict_b_val) is not torch.Tensor or sum(dict_a_val != dict_b_val) != 0 ): return False else: # for non-tensor vals if dict_a_val != dict_b_val: return False # if no non matching shared keys found, return true return True def _reformat_reports_for_visualizer(self) -> OrderedDict: r""" Takes the generated reports and reformats them into the format that is desired by the ModelReportVisualizer Returns an OrderedDict mapping module_fqns to their features """ # we want to reorder and reformat the information so it is ordered in terms of order # found in the model # first create new dict with all modules as keys and features under respective module module_fqns_to_features: dict[str, dict] = {} for report_name in self._generated_reports: # get mod -> feature dict and go through module_info = self._generated_reports[report_name] for module_fqn in module_info: # check if already in our accumulation dict if module_fqn in module_fqns_to_features: # we merge all the features together new_info: dict = module_info[module_fqn] present_info: dict = module_fqns_to_features[module_fqn] # merge them together into the new unioned dict # same features keys -> same info, so okay if override # do safety check to make sure shared keys have same info if self._is_same_info_for_same_key(new_info, present_info): module_fqns_to_features[module_fqn] = { **new_info, **present_info, } else: error_str = "You have the same key with different values across detectors. " error_str += "Someone incorrectly implemented a detector with conflicting keys to existing detectors." raise ValueError(error_str) else: # we just set it module_fqns_to_features[module_fqn] = module_info[module_fqn] # our ordered dict so that modules can be ordered in order of how they appear in model features_by_module: OrderedDict[str, dict] = OrderedDict() # we loop through modules in graph in order for fqn, _module in self._model.named_modules(): # find that fqn in fqns_to_features if fqn in module_fqns_to_features: # add it to our ordered dict features_by_module[fqn] = module_fqns_to_features[fqn] # return the ordered dict of info we created return features_by_module def generate_visualizer(self) -> ModelReportVisualizer: r""" Generates a ModelReportVisualizer instance using the reports generated by the generate_model_report() method. Returns the generated ModelReportVisualizer instance initialized Note: Throws exception if attempt to get visualizers without generating report """ # check if user has generated reports at least once if len(self._generated_reports) == 0: raise Exception( # noqa: TRY002 "Unable to generate visualizers without first generating reports" ) # get the ordered dict mapping modules to their full set of collected features / stats module_fqns_to_features: OrderedDict = self._reformat_reports_for_visualizer() # create and return ModelReportVisualizer instance visualizer: ModelReportVisualizer = ModelReportVisualizer( module_fqns_to_features ) return visualizer def _generate_qconfig_mapping_helper( self, detector_qconfig_info_combined: dict[str, DetectorQConfigInfo], generation_function: Callable, ) -> QConfigMapping: r""" This helper takes in the compiled detector qconfig info that has been compiled together and merges it into a QConfigMapping """ # keep track of the qconfigmapping qconfig_mapping = QConfigMapping() # loop through each module / fqn and attempt to create QConfigMapping for fqn, module in self._model.named_modules(): # if we have a qconfig info for this module if fqn in detector_qconfig_info_combined: qconfig_info_compiled = detector_qconfig_info_combined[fqn] # now generate the qconfig and add it to the mapping generated_qconfig = generation_function(qconfig_info_compiled, module) # add to our config qconfig_mapping.set_module_name(fqn, generated_qconfig) # return compiled mapping return qconfig_mapping def _update_detector_quantizaiton_qconfig_info( self, combined_info: DetectorQConfigInfo, new_info: DetectorQConfigInfo ): r""" Takes in the old and new information and updates the combined information. Args: combined_info (DetectorQConfigInfo): The DetectorQConfigInfo we are compiling all of the information in new_info (DetectorQConfigInfo): The DetectorQConfigInfo with the information we are trying to merge the new info into it """ combined_info.is_activation_dynamic = ( combined_info.is_activation_dynamic or new_info.is_activation_dynamic ) combined_info.is_weight_per_channel = ( combined_info.is_weight_per_channel or new_info.is_weight_per_channel ) def _update_detector_equalization_qconfig_info( self, combined_info: DetectorQConfigInfo, new_info: DetectorQConfigInfo ): r""" Takes in the old and new information and updates the combined information. Args: combined_info (DetectorQConfigInfo): The DetectorQConfigInfo we are compiling all of the information in new_info (DetectorQConfigInfo): The DetectorQConfigInfo with the information we are trying to merge the new info into it """ is_equalization_recommended = ( combined_info.is_equalization_recommended or new_info.is_equalization_recommended ) combined_info.is_equalization_recommended = is_equalization_recommended def _generate_module_fqn_to_detector_info_mapping( self, update_qconfig_info_function: Callable ) -> dict[str, DetectorQConfigInfo]: r""" Generates a QConfigMapping based on the suggestions of the ModelReport API. The generated mapping encompasses all the different types of feedback from the different detectors all into one place. These configs are based on the suggestions provided by the ModelReport API and can only be generated once the reports have been generated. Args: update_qconfig_info_function (Callable) takes in a function that takes in two DetectorQConfigInfo and updates the one that is being compiled Returns a Dict mapping module_fqns to DetectorQConfigInfo objects Note: Throws exception if we try to generate mapping on model we already removed observers from Throws exception if we try to generate mapping without preparing for calibration """ # if we haven't prepped model for calibration, then we shouldn't generate mapping yet if not self._prepared_flag: raise Exception( # noqa: TRY002 "Cannot generate report without preparing model for calibration" ) # if we already removed the observers, we cannot mapping if self._removed_observers: raise Exception( # noqa: TRY002 "Cannot generate report on model you already removed observers from" ) # keep track of qconfig info for each module across detectors detector_qconfig_info_combined: dict[str, DetectorQConfigInfo] = {} for detector in self._desired_report_detectors: # get the info from the detector detector_info: dict[str, DetectorQConfigInfo] = detector.get_qconfig_info( self._model ) # we go through the modules for module_fqn in detector_info: # see if we already have info on it if module_fqn in detector_qconfig_info_combined: # we combine the current options with what is there current_options = detector_qconfig_info_combined[module_fqn] detector_options = detector_info[module_fqn] update_qconfig_info_function(current_options, detector_options) else: # we just use this for now detector_qconfig_info_combined[module_fqn] = detector_info[ module_fqn ] return detector_qconfig_info_combined def generate_qconfig_mapping(self) -> QConfigMapping: r""" Generates a QConfigMapping based on the suggestions of the ModelReport API. The generated mapping encompasses all the different types of feedback from the different detectors all into one place. These configs are based on the suggestions provided by the ModelReport API and can only be generated once the reports have been generated. Returns a QConfigMapping for the quantization configuration Note: Throws exception if we try to generate mapping on model we already removed observers from Throws exception if we try to generate mapping without preparing for calibration """ # get the mapping info detector_qconfig_info_combined = ( self._generate_module_fqn_to_detector_info_mapping( self._update_detector_quantizaiton_qconfig_info ) ) # we will do a bit of processing and remove fqns that don't have input weight recommended # now we generate the QConfig for each of the options mapping: QConfigMapping = self._generate_qconfig_mapping_helper( detector_qconfig_info_combined, self._quantization_config_generator ) # return the generated mapping return mapping def _quantization_config_generator( self, detector_qconfig_info: DetectorQConfigInfo, module: torch.nn.Module ) -> QConfig: r""" Returns the quantization configuration generated by the DetectorQConfigInfo object """ return detector_qconfig_info.generate_quantization_qconfig(module) def _equalization_config_generator( self, detector_qconfig_info: DetectorQConfigInfo, module: torch.nn.Module ) -> EqualizationQConfig: r""" We ignore the module argument here, and only focus on thedetector_qconfig_info Returns the equalization configuration generated by the DetectorQConfigInfo object """ return detector_qconfig_info.generate_equalization_qconfig() def generate_equalization_mapping(self) -> QConfigMapping: r""" Generates a QConfigMapping based on the suggestions of the ModelReport API for equalization. The generated mapping encompasses all the different types of feedback from the input-weight equalization detector. These configs are based on the suggestions provided by the ModelReport API and can only be generated once the reports have been generated. Returns a QConfigMapping for the equalization configuration """ # get the mapping info detector_qconfig_info_combined = ( self._generate_module_fqn_to_detector_info_mapping( self._update_detector_equalization_qconfig_info ) ) # now we generate the QConfig for each of the options mapping: QConfigMapping = self._generate_qconfig_mapping_helper( detector_qconfig_info_combined, self._equalization_config_generator ) # return the generated mapping return mapping
ModelReport
python
pytorch__pytorch
torch/distributions/lowrank_multivariate_normal.py
{ "start": 1780, "end": 10163 }
class ____(Distribution): r""" Creates a multivariate normal distribution with covariance matrix having a low-rank form parameterized by :attr:`cov_factor` and :attr:`cov_diag`:: covariance_matrix = cov_factor @ cov_factor.T + cov_diag Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = LowRankMultivariateNormal( ... torch.zeros(2), torch.tensor([[1.0], [0.0]]), torch.ones(2) ... ) >>> m.sample() # normally distributed with mean=`[0,0]`, cov_factor=`[[1],[0]]`, cov_diag=`[1,1]` tensor([-0.2102, -0.5429]) Args: loc (Tensor): mean of the distribution with shape `batch_shape + event_shape` cov_factor (Tensor): factor part of low-rank form of covariance matrix with shape `batch_shape + event_shape + (rank,)` cov_diag (Tensor): diagonal part of low-rank form of covariance matrix with shape `batch_shape + event_shape` Note: The computation for determinant and inverse of covariance matrix is avoided when `cov_factor.shape[1] << cov_factor.shape[0]` thanks to `Woodbury matrix identity <https://en.wikipedia.org/wiki/Woodbury_matrix_identity>`_ and `matrix determinant lemma <https://en.wikipedia.org/wiki/Matrix_determinant_lemma>`_. Thanks to these formulas, we just need to compute the determinant and inverse of the small size "capacitance" matrix:: capacitance = I + cov_factor.T @ inv(cov_diag) @ cov_factor """ # pyrefly: ignore [bad-override] arg_constraints = { "loc": constraints.real_vector, "cov_factor": constraints.independent(constraints.real, 2), "cov_diag": constraints.independent(constraints.positive, 1), } support = constraints.real_vector has_rsample = True def __init__( self, loc: Tensor, cov_factor: Tensor, cov_diag: Tensor, validate_args: Optional[bool] = None, ) -> None: if loc.dim() < 1: raise ValueError("loc must be at least one-dimensional.") event_shape = loc.shape[-1:] if cov_factor.dim() < 2: raise ValueError( "cov_factor must be at least two-dimensional, " "with optional leading batch dimensions" ) if cov_factor.shape[-2:-1] != event_shape: raise ValueError( f"cov_factor must be a batch of matrices with shape {event_shape[0]} x m" ) if cov_diag.shape[-1:] != event_shape: raise ValueError( f"cov_diag must be a batch of vectors with shape {event_shape}" ) loc_ = loc.unsqueeze(-1) cov_diag_ = cov_diag.unsqueeze(-1) try: loc_, self.cov_factor, cov_diag_ = torch.broadcast_tensors( loc_, cov_factor, cov_diag_ ) except RuntimeError as e: raise ValueError( f"Incompatible batch shapes: loc {loc.shape}, cov_factor {cov_factor.shape}, cov_diag {cov_diag.shape}" ) from e self.loc = loc_[..., 0] self.cov_diag = cov_diag_[..., 0] batch_shape = self.loc.shape[:-1] self._unbroadcasted_cov_factor = cov_factor self._unbroadcasted_cov_diag = cov_diag self._capacitance_tril = _batch_capacitance_tril(cov_factor, cov_diag) super().__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(LowRankMultivariateNormal, _instance) batch_shape = torch.Size(batch_shape) loc_shape = batch_shape + self.event_shape new.loc = self.loc.expand(loc_shape) new.cov_diag = self.cov_diag.expand(loc_shape) new.cov_factor = self.cov_factor.expand(loc_shape + self.cov_factor.shape[-1:]) new._unbroadcasted_cov_factor = self._unbroadcasted_cov_factor new._unbroadcasted_cov_diag = self._unbroadcasted_cov_diag new._capacitance_tril = self._capacitance_tril super(LowRankMultivariateNormal, new).__init__( batch_shape, self.event_shape, validate_args=False ) new._validate_args = self._validate_args return new @property def mean(self) -> Tensor: return self.loc @property def mode(self) -> Tensor: return self.loc @lazy_property def variance(self) -> Tensor: # type: ignore[override] return ( self._unbroadcasted_cov_factor.pow(2).sum(-1) + self._unbroadcasted_cov_diag ).expand(self._batch_shape + self._event_shape) @lazy_property def scale_tril(self) -> Tensor: # The following identity is used to increase the numerically computation stability # for Cholesky decomposition (see http://www.gaussianprocess.org/gpml/, Section 3.4.3): # W @ W.T + D = D1/2 @ (I + D-1/2 @ W @ W.T @ D-1/2) @ D1/2 # The matrix "I + D-1/2 @ W @ W.T @ D-1/2" has eigenvalues bounded from below by 1, # hence it is well-conditioned and safe to take Cholesky decomposition. n = self._event_shape[0] cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1) Dinvsqrt_W = self._unbroadcasted_cov_factor / cov_diag_sqrt_unsqueeze K = torch.matmul(Dinvsqrt_W, Dinvsqrt_W.mT).contiguous() K.view(-1, n * n)[:, :: n + 1] += 1 # add identity matrix to K scale_tril = cov_diag_sqrt_unsqueeze * torch.linalg.cholesky(K) return scale_tril.expand( self._batch_shape + self._event_shape + self._event_shape ) @lazy_property def covariance_matrix(self) -> Tensor: covariance_matrix = torch.matmul( self._unbroadcasted_cov_factor, self._unbroadcasted_cov_factor.mT ) + torch.diag_embed(self._unbroadcasted_cov_diag) return covariance_matrix.expand( self._batch_shape + self._event_shape + self._event_shape ) @lazy_property def precision_matrix(self) -> Tensor: # We use "Woodbury matrix identity" to take advantage of low rank form:: # inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D) # where :math:`C` is the capacitance matrix. Wt_Dinv = ( self._unbroadcasted_cov_factor.mT / self._unbroadcasted_cov_diag.unsqueeze(-2) ) A = torch.linalg.solve_triangular(self._capacitance_tril, Wt_Dinv, upper=False) precision_matrix = ( torch.diag_embed(self._unbroadcasted_cov_diag.reciprocal()) - A.mT @ A ) return precision_matrix.expand( self._batch_shape + self._event_shape + self._event_shape ) def rsample(self, sample_shape: _size = torch.Size()) -> Tensor: shape = self._extended_shape(sample_shape) W_shape = shape[:-1] + self.cov_factor.shape[-1:] eps_W = _standard_normal(W_shape, dtype=self.loc.dtype, device=self.loc.device) eps_D = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device) return ( self.loc + _batch_mv(self._unbroadcasted_cov_factor, eps_W) + self._unbroadcasted_cov_diag.sqrt() * eps_D ) def log_prob(self, value): if self._validate_args: self._validate_sample(value) diff = value - self.loc M = _batch_lowrank_mahalanobis( self._unbroadcasted_cov_factor, self._unbroadcasted_cov_diag, diff, self._capacitance_tril, ) log_det = _batch_lowrank_logdet( self._unbroadcasted_cov_factor, self._unbroadcasted_cov_diag, self._capacitance_tril, ) return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + log_det + M) def entropy(self): log_det = _batch_lowrank_logdet( self._unbroadcasted_cov_factor, self._unbroadcasted_cov_diag, self._capacitance_tril, ) H = 0.5 * (self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + log_det) if len(self._batch_shape) == 0: return H else: return H.expand(self._batch_shape)
LowRankMultivariateNormal
python
pytorch__pytorch
test/inductor/test_flex_attention.py
{ "start": 235705, "end": 273177 }
class ____(InductorTestCase): def setUp(self): super().setUp() skipCPUIf( LONG_COMPILATION_ON_CPU, "skip UT for CPU due to long compilation time found in CI", ) self.dtype = torch.float32 self.atol = 3e-2 self.rtol = 3e-2 def _init_tensors(self, params: Params, device: str): make_tensor = functools.partial( torch.randn, (params.batch_size, params.num_heads, params.seq_length, params.head_dim), device=device, dtype=params.dtype, requires_grad=True, ) return (make_tensor(), make_tensor(), make_tensor()) @torch.no_grad() def _gold_check(self, eager, compiled, gold, tensor_name, fudge_factor=1.35): ref_error = rmse(eager, gold) comp_error = rmse(compiled, gold) # Note: This has been carefully tested that FlexAttention is within # 20% of the average error of SDPA! Do not bump this tolerance # unless you are absolutely sure you are not worsening the accuracy # of FlexAttention! if eager.dtype == torch.float32: fudge_factor = 10.0 * fudge_factor comp_error = comp_error.item() ref_error = ref_error.item() * fudge_factor if ( tensor_name == "out" and eager.dtype == torch.float32 and comp_error > ref_error ): self.skipTest("Compiled FlexAttention is less accurate than eager in fp32") self.assertLessEqual( comp_error, (ref_error * fudge_factor), f"\nTensor: {tensor_name}\nCompiled error ({comp_error:.8f}) exceeds " f"reference error ({ref_error:.8f}) * fudge_factor ({fudge_factor})", ) def _check_outputs_and_grads( self, out_eager, out_compiled, out_gold, tensors, names=None ): backwards_grad = torch.randn_like(out_eager, device="cpu").to(out_eager.device) grads_eager = torch.autograd.grad((out_eager,), tensors, backwards_grad) grads_compiled = torch.autograd.grad((out_compiled,), tensors, backwards_grad) grads_gold = torch.autograd.grad((out_gold,), tensors, backwards_grad) tensor_names = ( ["out", "grad_query", "grad_key", "grad_value", "grad_bias"] if names is None else names ) eager_tensors = (out_eager, *grads_eager) compiled_tensors = (out_compiled, *grads_compiled) gold_tensors = (out_gold, *grads_gold) for eager, compiled, gold, name in zip( eager_tensors, compiled_tensors, gold_tensors, tensor_names, strict=True ): self._gold_check(eager, compiled, gold, name) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) @common_utils.parametrize("mode", ["default", "max-autotune-no-cudagraphs"]) def test_relative_1d_bias(self, device, params, mode: str): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( 2 * params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[torch.abs(q_idx - kv_idx)] flex_compiled = torch.compile(flex_attention, mode=mode) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_absolute_2d_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.seq_length, params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[q_idx, kv_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_head_specific_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.num_heads, params.seq_length, params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[h, q_idx, kv_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_batch_head_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.batch_size, params.num_heads, params.seq_length, params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[b, h, q_idx, kv_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_multiplicative_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score * bias[q_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_local_window_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) window_size = 8 bias = torch.randn( 2 * window_size + 1, device=device, dtype=torch.float32, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): window_idx = torch.clamp(q_idx - kv_idx + window_size, 0, 2 * window_size) return score + bias[window_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_global_tokens_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.seq_length, device=device, dtype=torch.float32, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[kv_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_weird_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.batch_size, params.num_heads, 4, params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) which_bias = torch.tensor(0, device=device) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[b, h, which_bias, q_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_indirect_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) offset = torch.randint( 0, params.seq_length, (params.seq_length,), device=device, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[offset[q_idx]] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) @common_utils.parametrize("mode", ["default", "max-autotune-no-cudagraphs"]) def test_symmetric_bias(self, device, params, mode: str): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[q_idx] + bias[kv_idx] flex_compiled = torch.compile(flex_attention, mode=mode) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) # Error in backwards with self.assertRaisesRegex( torch._inductor.exc.InductorError, "Using multiple indexing operations on the same tensor that requires gradients", ): self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_flipped_indexed_bias(self, device, params): query, key, value = self._init_tensors(params, device=device) bias = torch.randn( params.seq_length, params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[kv_idx, q_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) @common_utils.parametrize("mode", ["default", "max-autotune-no-cudagraphs"]) def test_head_specific_gate(self, device, params, mode: str): query, key, value = self._init_tensors(params, device=device) gate_score = torch.randn( params.num_heads, device=device, dtype=torch.float32, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score * torch.sigmoid(gate_score[h]) flex_compiled = torch.compile(flex_attention, mode=mode) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, gate_score), ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_distinct_biases(self, device, params): query, key, value = self._init_tensors(params, device=device) # Create two separate bias tensors bias1 = torch.randn( params.seq_length, device=device, dtype=params.dtype, requires_grad=True, ) bias2 = torch.randn( params.seq_length, device=device, dtype=torch.float32, requires_grad=True, ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias1[q_idx] + bias2[kv_idx] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) # Include both bias tensors in the tuple for gradient checking self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (query, key, value, bias1, bias2), names=[ "out", "grad_query", "grad_key", "grad_value", "grad_bias1", "grad_bias2", ], ) @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) @torch.compile def test_learnable_bias_global_compiled(self, device, params): batch_size = 1 num_heads = 1 seq_len = 128 head_dim = 16 d_model = num_heads * head_dim query = torch.randn(batch_size, num_heads, seq_len, head_dim, device=device) key = torch.randn(batch_size, num_heads, seq_len, head_dim, device=device) value = torch.randn(batch_size, num_heads, seq_len, head_dim, device=device) out_proj = nn.Linear(d_model, d_model, device=device) query.requires_grad = True key.requires_grad = True value.requires_grad = True bias = torch.randn( batch_size, num_heads, seq_len, seq_len, device=device, requires_grad=True, ) def bias_mod(score, b, h, q_idx, kv_idx): return score + bias[b, h, q_idx, kv_idx] out = flex_attention( query=query, key=key, value=value, score_mod=bias_mod, ) out = out.transpose(1, 2).contiguous().view(batch_size, seq_len, d_model) attn_output = out_proj(out) random_target = torch.randn(batch_size, seq_len, d_model, device=device) loss = torch.nn.functional.mse_loss(attn_output, random_target) loss.backward() assert bias.grad, "No gradient computed for bias" assert torch.any(bias.grad != 0), "Gradient for bias is 0" @skip_on_cpu def test_backprop_error_case(self, device): @torch.compile() def test(x, y): # Materialize a bias matrix B, L, device = x.shape[0], x.shape[1], x.device b = torch.arange(B, device=device, dtype=torch.long).view(B, 1, 1) q_idx = torch.arange(L, device=device, dtype=torch.long).view(1, L, 1) kv_idx = torch.arange(L, device=device, dtype=torch.long).view(1, 1, L) bias_mat = y[b, q_idx] + y[b, kv_idx] # (B, L, L) # Dummy score_mod retrieving bias values def score_mod(score, b, h, q_idx, kv_idx): return score + bias_mat[b, q_idx, kv_idx] x_ = x[:, :, None].repeat(1, 1, 16, 1) # torch._dynamo.graph_break() return flex_attention(x_, x_, x_, score_mod=score_mod) B, L, D = 2, 16, 64 x = torch.randn(B, L, D, device=device, requires_grad=True) y = torch.randn(B, L, device=device, requires_grad=True) _ = test(x, y).mean().backward() assert x.grad.norm() > 0 assert y.grad.norm() > 0 @skip_on_cpu @common_utils.parametrize( "params", get_params(device_configs["cuda"].dtypes), name_fn=lambda x: f"{x}" ) def test_relative_1d_bias_only_grad(self, device, params): query, key, value = self._init_tensors(params, device=device) query = query.detach().requires_grad_(False) key = key.detach().requires_grad_(False) value = value.detach().requires_grad_(False) # Only bias requires gradients bias = torch.randn( 2 * params.seq_length, device=device, dtype=params.dtype, requires_grad=True, # Only bias needs gradients ) def bias_func(score, b, h, q_idx, kv_idx): return score + bias[torch.abs(q_idx - kv_idx)] flex_compiled = torch.compile(flex_attention) out_eager = flex_attention(query, key, value, score_mod=bias_func) out_compiled = flex_compiled(query, key, value, score_mod=bias_func) out_gold = flex_attention( query.to(torch.float64), key.to(torch.float64), value.to(torch.float64), score_mod=bias_func, ) # For gradient checking, we only pass the bias tensor since it's the only one requiring gradients self._check_outputs_and_grads( out_eager, out_compiled, out_gold, (bias,), names=["out", "bias"] ) def _test_flex_attention_with_dynamic_max_autotune(self, device): query = torch.randn(2, 16, 512, 64, device=device) key = torch.randn(2, 16, 512, 64, device=device) value = torch.randn(2, 16, 512, 64, device=device) query.requires_grad = True key.requires_grad = True value.requires_grad = True shape = (2, 16, 512, 16, 512, 64) B, Hq, M, Hkv, N, D = shape score_mod = _generate_alibi_bias(8) def causal(b, h, m, n): return m >= n mask_shape = (1, 1, M, N) block_mask = torch.compile(create_block_mask)( causal, *mask_shape, device=device ) compiled_sdpa = torch.compile( flex_attention, dynamic=True, mode="max-autotune-no-cudagraphs" ) out = compiled_sdpa( query=query, key=key, value=value, score_mod=score_mod, block_mask=block_mask, enable_gqa=True, kernel_options=None, ) out.sum().backward() self.assertEqual( out.shape, query.shape, f"Expected shape {query.shape}, got {out.shape}" ) @skip_on_cpu def test_flex_attention_with_dynamic_max_autotune(self, device): self._test_flex_attention_with_dynamic_max_autotune(device) @skip_on_cpu @torch._inductor.config.patch("graph_partition", True) def test_flex_attention_with_dynamic_max_autotune_graph_partition(self, device): self._test_flex_attention_with_dynamic_max_autotune(device) @skip_on_cpu def test_flex_attention_logging(self, device): with tempfile.TemporaryDirectory() as tmpdir: log_file = os.path.join(tmpdir, "flex_attention_configs") with patch.dict( os.environ, {"TORCHINDUCTOR_FLEX_ATTENTION_LOGGING_FILE": log_file} ): query = torch.randn( 1, 2, 128, 64, device=device, dtype=torch.float16, requires_grad=True, ) key = torch.randn( 1, 2, 128, 64, device=device, dtype=torch.float16, requires_grad=True, ) value = torch.randn( 1, 2, 128, 64, device=device, dtype=torch.float16, requires_grad=True, ) def score_mod(score, b, h, q_idx, kv_idx): return score * 2 def causal_mask(b, h, q_idx, kv_idx): return q_idx >= kv_idx block_mask = torch.compile(create_block_mask)( causal_mask, 1, 1, 128, 128, device=device ) compiled_flex = torch.compile( flex_attention, mode="max-autotune-no-cudagraphs" ) out = compiled_flex( query=query, key=key, value=value, score_mod=score_mod, block_mask=block_mask, ) out.sum().backward() json_file = log_file + ".json" self.assertTrue( os.path.exists(json_file), f"Log file {json_file} was not created" ) with open(json_file) as f: log_data = json.load(f) self.assertIsInstance(log_data, list) self.assertEqual(len(log_data), 2) keys_seen = [next(iter(entry.keys())) for entry in log_data] expected_fwd_key = "('forward', 1, 2, 2, 128, 128, 64, 64)" expected_bwd_key = "('backward', 1, 2, 2, 128, 128, 64, 64)" self.assertIn(expected_fwd_key, keys_seen) self.assertIn(expected_bwd_key, keys_seen) for entry in log_data: self.assertIsInstance(entry, dict) self.assertEqual(len(entry), 1) dims_key = next(iter(entry.keys())) choices = entry[dims_key] kernel_type = eval(dims_key)[0] self.assertIsInstance(choices, list) self.assertGreater(len(choices), 0) for i, choice in enumerate(choices): self.assertIn("type", choice) self.assertIn("time", choice) if choice["type"] == "triton": self.assertIn("num_warps", choice) self.assertIn("num_stages", choice) if kernel_type == "forward": self.assertIn("BLOCK_M", choice) self.assertIn("BLOCK_N", choice) self.assertNotIn("BLOCK_M1", choice) elif kernel_type == "backward": self.assertIn("BLOCK_M1", choice) self.assertIn("BLOCK_N1", choice) self.assertIn("BLOCK_M2", choice) self.assertIn("BLOCK_N2", choice) self.assertNotIn("BLOCK_M", choice) self.assertNotIn("BLOCK_N", choice) if i > 0: self.assertLessEqual(choices[0]["time"], choice["time"]) @skip_on_cpu def test_inspect_bug(self, device): # https://github.com/pytorch/pytorch/issues/139374 def sliding_window(b, h, q_idx, kv_idx, val): return (q_idx - kv_idx).abs() < val sliding_window2 = functools.partial( sliding_window, val=torch.randn((), device=device) ) opt_fn = torch.compile(create_block_mask, fullgraph=True) create_block_mask(sliding_window2, None, None, 1024, 1024, device=device) # checks that the compile is working opt_fn(sliding_window2, None, None, 1024, 1024, device=device) @supported_platform @skip_on_cpu def test_head_bias_req_grad(self, device): B, H, S, D = 1, 4, 256, 64 bias = torch.randn(H, device=device, dtype=torch.float16, requires_grad=True) bias_flex = bias.detach().clone().requires_grad_(True) def head_bias(score, b, h, q_idx, kv_idx): return score + bias_flex[h] bias_sdpa_ref = bias.detach().clone().requires_grad_(True) implicit_bias_sdpa_ref = bias_sdpa_ref implicit_bias_sdpa_ref = implicit_bias_sdpa_ref.view(H, 1, 1).expand(H, S, S) bias_sdpa_gold = ( bias.detach().clone().to(dtype=torch.float64).requires_grad_(True) ) implicit_bias_sdpa_gold = bias_sdpa_gold implicit_bias_sdpa_gold = implicit_bias_sdpa_gold.view(H, 1, 1).expand(H, S, S) self._test_learnable_bias_inner( B, H, S, D, head_bias, bias_flex, implicit_bias_sdpa_ref, bias_sdpa_ref, implicit_bias_sdpa_gold, bias_sdpa_gold, device, ) @supported_platform @skip_on_cpu def test_comparison_vs_sdpa_with_learnable_bias(self, device): # 1-dimensional bias: B, H, S, D = 1, 1, 256, 64 bias = torch.randn( 2 * S, device=device, dtype=torch.float16, requires_grad=True ) bias_flex = bias.detach().clone().requires_grad_(True) def rel_pos_1d(score, b, h, q_idx, kv_idx): return score + bias_flex[q_idx + kv_idx] bias_indices = torch.arange(S)[:, None] + torch.arange(S) bias_sdpa_ref = bias.detach().clone().requires_grad_(True) implicit_bias_sdpa_ref = bias_sdpa_ref[bias_indices] bias_sdpa_gold = ( bias.detach().clone().to(dtype=torch.float64).requires_grad_(True) ) implicit_bias_sdpa_gold = bias_sdpa_gold[bias_indices] self._test_learnable_bias_inner( B, H, S, D, rel_pos_1d, bias_flex, implicit_bias_sdpa_ref, bias_sdpa_ref, implicit_bias_sdpa_gold, bias_sdpa_gold, device, ) # 2-dimensional bias: B, H, S, D = 1, 1, 256, 64 bias = torch.randn(S, S, device=device, dtype=torch.float16, requires_grad=True) bias_flex = bias.detach().clone().requires_grad_(True) def rel_pos_2d(score, b, h, q_idx, kv_idx): return score + bias_flex[q_idx, kv_idx] bias_sdpa_ref = bias.detach().clone().requires_grad_(True) implicit_bias_sdpa_ref = bias_sdpa_ref bias_sdpa_gold = ( bias.detach().clone().to(dtype=torch.float64).requires_grad_(True) ) implicit_bias_sdpa_gold = bias_sdpa_gold self._test_learnable_bias_inner( B, H, S, D, rel_pos_2d, bias_flex, implicit_bias_sdpa_ref, bias_sdpa_ref, implicit_bias_sdpa_gold, bias_sdpa_gold, device, ) # 2-dimensional bias + index multiple B, H, S, D = 1, 1, 256, 64 bias = torch.randn(S, S, device=device, dtype=torch.float16, requires_grad=True) bias_flex = bias.detach().clone().requires_grad_(True) def rel_pos_2d(score, b, h, q_idx, kv_idx): return score + bias_flex[q_idx][kv_idx] bias_sdpa_ref = bias.detach().clone().requires_grad_(True) implicit_bias_sdpa_ref = bias_sdpa_ref bias_sdpa_gold = ( bias.detach().clone().to(dtype=torch.float64).requires_grad_(True) ) implicit_bias_sdpa_gold = bias_sdpa_gold self._test_learnable_bias_inner( B, H, S, D, rel_pos_2d, bias_flex, implicit_bias_sdpa_ref, bias_sdpa_ref, implicit_bias_sdpa_gold, bias_sdpa_gold, device, ) # 2-dimensional bias + transposed: B, H, S, D = 1, 1, 256, 64 bias = torch.randn(S, S, device=device, dtype=torch.float16, requires_grad=True) bias_flex = bias.detach().clone().requires_grad_(True) def rel_pos_2d_transposed(score, b, h, q_idx, kv_idx): return score + bias_flex[kv_idx, q_idx] bias_sdpa_ref = bias.detach().clone().requires_grad_(True) implicit_bias_sdpa_ref = bias_sdpa_ref.transpose(-1, -2) bias_sdpa_gold = ( bias.detach().clone().to(dtype=torch.float64).requires_grad_(True) ) implicit_bias_sdpa_gold = bias_sdpa_gold.transpose(-1, -2) self._test_learnable_bias_inner( B, H, S, D, rel_pos_2d_transposed, bias_flex, implicit_bias_sdpa_ref, bias_sdpa_ref, implicit_bias_sdpa_gold, bias_sdpa_gold, device, ) # 3-dimensional bias + transposed B, H, S, D = 4, 8, 256, 64 bias = torch.randn( H, S, S, device=device, dtype=torch.float16, requires_grad=True ) bias_flex = bias.detach().clone().requires_grad_(True) def rel_pos_3d_transposed(score, b, h, q_idx, kv_idx): return score + bias_flex[h, kv_idx, q_idx] bias_sdpa_ref = bias.detach().clone().requires_grad_(True) implicit_bias_sdpa_ref = bias_sdpa_ref.transpose(-1, -2) bias_sdpa_gold = ( bias.detach().clone().to(dtype=torch.float64).requires_grad_(True) ) implicit_bias_sdpa_gold = bias_sdpa_gold.transpose(-1, -2) self._test_learnable_bias_inner( B, H, S, D, rel_pos_3d_transposed, bias_flex, implicit_bias_sdpa_ref, bias_sdpa_ref, implicit_bias_sdpa_gold, bias_sdpa_gold, device, ) def _test_learnable_bias_inner( self, B, H, S, D, score_mod, bias_flex, implicit_bias_sdpa_ref, bias_sdpa_ref, implicit_bias_sdpa_gold, bias_sdpa_gold, device, ): make_tensor = functools.partial( torch.ones, (B, H, S, D), device=device, dtype=torch.float16, requires_grad=True, ) q_ref, k_ref, v_ref = make_tensor(), make_tensor(), make_tensor() q_gold, k_gold, v_gold = query_key_value_clones( q_ref, k_ref, v_ref, torch.float64 ) q_flex, k_flex, v_flex = query_key_value_clones(q_ref, k_ref, v_ref) out_ref = torch.nn.functional.scaled_dot_product_attention( q_ref, k_ref, v_ref, attn_mask=implicit_bias_sdpa_ref ) out_ref.sum().backward() out_gold = torch.nn.functional.scaled_dot_product_attention( q_gold, k_gold, v_gold, attn_mask=implicit_bias_sdpa_gold ) out_gold.sum().backward() out_flex = flex_attention(q_flex, k_flex, v_flex, score_mod=score_mod) out_flex.sum().backward() name = score_mod.__name__ for ref, flex, gold in [ (out_ref, out_flex, out_gold), (q_ref.grad, q_flex.grad, q_gold.grad), (k_ref.grad, k_flex.grad, k_gold.grad), (v_ref.grad, v_flex.grad, v_gold.grad), (bias_sdpa_ref.grad, bias_flex.grad, bias_sdpa_gold.grad), ]: ref_error = rmse(ref, gold) flex_error = rmse(flex, gold) self.assertTrue( ref_error * 1.2 >= flex_error, f"{name} -> Ref error: {ref_error}, Flex eager Error: {flex_error}", ) instantiate_device_type_tests( TestFlexAttention, globals(), only_for=test_device, allow_xpu=True ) instantiate_device_type_tests( TestPagedAttention, globals(), only_for=test_device, allow_xpu=True ) instantiate_device_type_tests( TestBlockMask, globals(), only_for=(test_device[0] if HAS_GPU else "cuda",), allow_xpu=True, ) instantiate_device_type_tests( TestLearnableBiases, globals(), only_for=test_device, allow_xpu=True ) if __name__ == "__main__": from torch._inductor.test_case import run_tests run_tests()
TestLearnableBiases
python
scikit-learn__scikit-learn
sklearn/utils/_param_validation.py
{ "start": 10724, "end": 11022 }
class ____(_Constraint): """Constraint representing the indicator `np.nan`.""" def is_satisfied_by(self, val): return ( not isinstance(val, Integral) and isinstance(val, Real) and math.isnan(val) ) def __str__(self): return "numpy.nan"
_NanConstraint
python
PyCQA__pylint
doc/data/messages/m/match-class-positional-attributes/bad.py
{ "start": 0, "end": 268 }
class ____: __match_args__ = ("title", "year") def __init__(self, title, year): self.title = title self.year = year def func(item: Book): match item: case Book("abc", 2000): # [match-class-positional-attributes] ...
Book
python
joke2k__faker
faker/providers/internet/cs_CZ/__init__.py
{ "start": 46, "end": 802 }
class ____(InternetProvider): user_name_formats = ( "{{last_name_female}}.{{first_name_female}}", "{{last_name_female}}.{{first_name_female}}", "{{last_name_male}}.{{first_name_male}}", "{{last_name_male}}.{{first_name_male}}", "{{first_name_female}}.{{last_name_female}}", "{{first_name_male}}.{{last_name_male}}", "{{first_name}}##", "?{{last_name}}", "?{{last_name}}", "?{{last_name}}", ) email_formats = ("{{user_name}}@{{free_email_domain}}",) free_email_domains = ( "seznam.cz", "gmail.com", "email.cz", "post.cz", "chello.cz", "centrum.cz", "volny.cz", ) tlds = ("cz", "com", "cz")
Provider
python
tornadoweb__tornado
tornado/test/websocket_test.py
{ "start": 28067, "end": 30742 }
class ____(WebSocketBaseTestCase): def get_app(self): self.handlers: list[WebSocketHandler] = [] test = self class PingHandler(TestWebSocketHandler): def initialize(self, close_future=None, compression_options=None): self.handlers = test.handlers # capture the handler instance so we can interrogate it later self.handlers.append(self) return super().initialize( close_future=close_future, compression_options=compression_options ) app = Application([("/", PingHandler)]) return app @staticmethod def install_hook(ws): """Optionally suppress the client's "pong" response.""" ws.drop_pongs = False ws.pongs_received = 0 def wrapper(fcn): def _inner(opcode: int, data: bytes): if opcode == 0xA: # NOTE: 0x9=ping, 0xA=pong ws.pongs_received += 1 if ws.drop_pongs: # prevent pong responses return # leave all other responses unchanged return fcn(opcode, data) return _inner ws.protocol._handle_message = wrapper(ws.protocol._handle_message) @gen_test def test_client_ping_timeout(self): # websocket client interval = 0.2 ws = yield self.ws_connect( "/", ping_interval=interval, ping_timeout=interval / 4 ) self.install_hook(ws) # websocket handler (server side) handler = self.handlers[0] for _ in range(5): # wait for the ping period yield gen.sleep(interval) # connection should still be open from the server end self.assertIsNone(handler.close_code) self.assertIsNone(handler.close_reason) # connection should still be open from the client end assert ws.protocol.close_code is None # Check that our hook is intercepting messages; allow for # some variance in timing (due to e.g. cpu load) self.assertGreaterEqual(ws.pongs_received, 4) # suppress the pong response message ws.drop_pongs = True # give the server time to register this yield gen.sleep(interval * 1.5) # connection should be closed from the server side self.assertEqual(handler.close_code, 1000) self.assertEqual(handler.close_reason, "ping timed out") # client should have received a close operation self.assertEqual(ws.protocol.close_code, 1000)
ServerPingTimeoutTest
python
ray-project__ray
python/ray/air/execution/_internal/event_manager.py
{ "start": 192, "end": 4933 }
class ____: """Event manager for Ray futures. The event manager can be used to track futures and invoke callbacks when they resolve. Futures are tracked with :meth:`track_future`. Future can then be awaited with :meth:`wait`. When futures successfully resolve, they trigger an optional ``on_result`` callback that can be passed to :meth:`track_future`. If they fail, they trigger an optional ``on_error`` callback. Args: shuffle_futures: If True, futures will be shuffled before awaited. This will avoid implicit prioritization of futures within Ray. """ def __init__(self, shuffle_futures: bool = True): self._shuffle_futures = shuffle_futures # Map of futures to callbacks (result, error) self._tracked_futures: Dict[ ray.ObjectRef, Tuple[Optional[_ResultCallback], Optional[_ErrorCallback]] ] = {} def track_future( self, future: ray.ObjectRef, on_result: Optional[_ResultCallback] = None, on_error: Optional[_ErrorCallback] = None, ): """Track a single future and invoke callbacks on resolution. Control has to be yielded to the event manager for the callbacks to be invoked, either via :meth:`wait` or via :meth:`resolve_future`. Args: future: Ray future to await. on_result: Callback to invoke when the future resolves successfully. on_error: Callback to invoke when the future fails. """ self._tracked_futures[future] = (on_result, on_error) def track_futures( self, futures: Iterable[ray.ObjectRef], on_result: Optional[_ResultCallback] = None, on_error: Optional[_ErrorCallback] = None, ): """Track multiple futures and invoke callbacks on resolution. Control has to be yielded to the event manager for the callbacks to be invoked, either via :meth:`wait` or via :meth:`resolve_future`. Args: futures: Ray futures to await. on_result: Callback to invoke when the future resolves successfully. on_error: Callback to invoke when the future fails. """ for future in futures: self.track_future(future, on_result=on_result, on_error=on_error) def discard_future(self, future: ray.ObjectRef): """Remove future from tracking. The future will not be awaited anymore, and it will not trigger any callbacks. Args: future: Ray futures to discard. """ self._tracked_futures.pop(future, None) def get_futures(self) -> Set[ray.ObjectRef]: """Get futures tracked by the event manager.""" return set(self._tracked_futures) @property def num_futures(self) -> int: return len(self._tracked_futures) def resolve_future(self, future: ray.ObjectRef): """Resolve a single future. This method will block until the future is available. It will then trigger the callback associated to the future and the event (success or error), if specified. Args: future: Ray future to resolve. """ try: on_result, on_error = self._tracked_futures.pop(future) except KeyError as e: raise ValueError( f"Future {future} is not tracked by this RayEventManager" ) from e try: result = ray.get(future) except Exception as e: if on_error: on_error(e) else: raise e else: if on_result: on_result(result) def wait( self, timeout: Optional[Union[float, int]] = None, num_results: Optional[int] = 1, ): """Wait up to ``timeout`` seconds for ``num_results`` futures to resolve. If ``timeout=None``, this method will block until all `num_results`` futures resolve. If ``num_results=None``, this method will await all tracked futures. For every future that resolves, the respective associated callbacks will be invoked. Args: timeout: Timeout in second to wait for futures to resolve. num_results: Number of futures to await. If ``None``, will wait for all tracked futures to resolve. """ futures = list(self.get_futures()) if self._shuffle_futures: random.shuffle(futures) num_results = num_results or len(futures) ready, _ = ray.wait(list(futures), timeout=timeout, num_returns=num_results) for future in ready: self.resolve_future(future)
RayEventManager
python
pyqtgraph__pyqtgraph
pyqtgraph/flowchart/library/Filters.py
{ "start": 9705, "end": 10067 }
class ____(CtrlNode): """Removes baseline from data, ignoring anomalous events""" nodeName = 'AdaptiveDetrend' uiTemplate = [ ('threshold', 'doubleSpin', {'value': 3.0, 'min': 0, 'max': 1000000}) ] def processData(self, data): return functions.adaptiveDetrend(data, threshold=self.ctrls['threshold'].value())
AdaptiveDetrend
python
getsentry__sentry
tests/sentry/sentry_apps/tasks/test_sentry_apps.py
{ "start": 58837, "end": 63278 }
class ____(TestCase): def setUp(self) -> None: self.project = self.create_project() self.user = self.create_user() self.sentry_app = self.create_sentry_app( organization=self.project.organization, events=["comment.updated", "comment.created", "comment.deleted"], ) self.install = self.create_sentry_app_installation( organization=self.project.organization, slug=self.sentry_app.slug ) self.issue = self.create_group(project=self.project) self.note = Activity.objects.create( group=self.issue, project=self.project, type=ActivityType.NOTE.value, user_id=self.user.id, data={"text": "hello world"}, ) self.data = { "comment_id": self.note.id, "timestamp": self.note.datetime.isoformat(), "comment": self.note.data["text"], "project_slug": self.note.project.slug, } @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_sends_comment_created_webhook( self, mock_record: MagicMock, safe_urlopen: MagicMock ) -> None: build_comment_webhook( self.install.id, self.issue.id, "comment.created", self.user.id, data=self.data ) ((_, kwargs),) = safe_urlopen.call_args_list assert kwargs["url"] == self.sentry_app.webhook_url assert kwargs["headers"]["Sentry-Hook-Resource"] == "comment" data = json.loads(kwargs["data"]) assert data["action"] == "created" assert data["data"]["issue_id"] == self.issue.id # SLO assertions assert_success_metric(mock_record) # PREPARE_WEBHOOK (success) -> SEND_WEBHOOK (success) -> SEND_WEBHOOK (success) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=3 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=3 ) @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_sends_comment_updated_webhook( self, mock_record: MagicMock, safe_urlopen: MagicMock ) -> None: self.data.update(data={"text": "goodbye world"}) build_comment_webhook( self.install.id, self.issue.id, "comment.updated", self.user.id, data=self.data ) ((_, kwargs),) = safe_urlopen.call_args_list assert kwargs["url"] == self.sentry_app.webhook_url assert kwargs["headers"]["Sentry-Hook-Resource"] == "comment" data = json.loads(kwargs["data"]) assert data["action"] == "updated" assert data["data"]["issue_id"] == self.issue.id # SLO assertions assert_success_metric(mock_record) # PREPARE_WEBHOOK (success) -> SEND_WEBHOOK (success) -> SEND_WEBHOOK (success) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=3 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=3 ) @patch("sentry.integrations.utils.metrics.EventLifecycle.record_event") def test_sends_comment_deleted_webhook( self, mock_record: MagicMock, safe_urlopen: MagicMock ) -> None: self.note.delete() build_comment_webhook( self.install.id, self.issue.id, "comment.deleted", self.user.id, data=self.data ) ((_, kwargs),) = safe_urlopen.call_args_list assert kwargs["url"] == self.sentry_app.webhook_url assert kwargs["headers"]["Sentry-Hook-Resource"] == "comment" data = json.loads(kwargs["data"]) assert data["action"] == "deleted" assert data["data"]["issue_id"] == self.issue.id # SLO assertions assert_success_metric(mock_record) # PREPARE_WEBHOOK (success) -> SEND_WEBHOOK (success) -> SEND_WEBHOOK (success) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=3 ) assert_count_of_metric( mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=3 ) @patch("sentry.utils.sentry_apps.webhooks.safe_urlopen", return_value=MockResponseInstance)
TestCommentWebhook
python
ApeWorX__ape
src/ape/api/query.py
{ "start": 4462, "end": 4709 }
class ____(_BaseQuery): """ A ``QueryType`` that collects properties of ``TransactionAPI`` over a range of transactions collected inside the ``BlockAPI` object represented by ``block_id``. """ block_id: Any
BlockTransactionQuery
python
PyCQA__pylint
doc/data/messages/i/invalid-bytes-returned/bad.py
{ "start": 0, "end": 135 }
class ____: """__bytes__ returns <type 'str'>""" def __bytes__(self): # [invalid-bytes-returned] return "123"
CustomBytes
python
PyCQA__pylint
tests/functional/m/member/member_checks.py
{ "start": 4421, "end": 4509 }
class ____(metaclass=MetaWithDynamicGetattr): pass SomeClass.does_not_exist
SomeClass
python
PrefectHQ__prefect
tests/runtime/test_flow_run.py
{ "start": 3515, "end": 4817 }
class ____: """ This class may appear to reproduce some tests from the AttributeAccessPatterns tests but is intended to be copy / pastable for other new attributes to ensure full coverage of feature set for each attribute. """ async def test_id_is_attribute(self): assert "id" in dir(flow_run) async def test_id_is_none_when_not_set(self): assert flow_run.id is None async def test_id_uses_env_var_when_set(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value="foo") assert flow_run.id == "foo" async def test_id_prioritizes_context_info_over_env_var_dynamically( self, monkeypatch: pytest.MonkeyPatch ): monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value="foo") assert flow_run.id == "foo" @flow def flow_with_new_id(): return flow_run.id new_id = flow_with_new_id() assert isinstance(new_id, str) assert flow_with_new_id() != "foo" assert flow_run.id == "foo" async def test_id_can_be_retrieved_from_task_run_context(self): with TaskRunContext.model_construct( task_run=TaskRun.model_construct(flow_run_id="foo") ): assert flow_run.id == "foo"
TestID
python
pandas-dev__pandas
asv_bench/benchmarks/indexing_engines.py
{ "start": 5670, "end": 6372 }
class ____: params = [("monotonic_incr", "monotonic_decr", "non_monotonic")] param_names = ["index_type"] def setup(self, index_type): N = 10**5 values = list("a" * N + "b" * N + "c" * N) arr = { "monotonic_incr": np.array(values, dtype=object), "monotonic_decr": np.array(list(reversed(values)), dtype=object), "non_monotonic": np.array(list("abc") * N, dtype=object), }[index_type] self.data = libindex.ObjectEngine(arr) # code below avoids populating the mapping etc. while timing. self.data.get_loc("b") def time_get_loc(self, index_type): self.data.get_loc("b")
ObjectEngineIndexing
python
pypa__warehouse
warehouse/subscriptions/services.py
{ "start": 13360, "end": 13946 }
class ____(GenericBillingService): @classmethod def create_service(cls, context, request): stripe.api_version = request.registry.settings["billing.api_version"] stripe.api_key = request.registry.settings["billing.secret_key"] publishable_key = request.registry.settings["billing.publishable_key"] webhook_secret = request.registry.settings["billing.webhook_key"] domain = request.registry.settings["billing.domain"] return cls(stripe, publishable_key, webhook_secret, domain) @implementer(ISubscriptionService)
StripeBillingService
python
getsentry__sentry
src/sentry/preprod/migrations/0004_add_django_jsonfield.py
{ "start": 155, "end": 1450 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("preprod", "0003_drop_sentry_jsonfield_actual"), ] operations = [ migrations.AddField( model_name="preprodartifact", name="extras", field=models.JSONField(null=True), ), ]
Migration
python
getsentry__sentry
tests/sentry/hybridcloud/apigateway/test_apigateway_helpers.py
{ "start": 276, "end": 803 }
class ____(ApiGatewayTestCase): @responses.activate def test_verify_request_body(self) -> None: body = {"ab": "cd"} headers = {"header": "nope", "content-type": "application/json"} responses.add_callback( responses.POST, "http://ab.cd.e/test", verify_request_body(body, headers) ) resp = requests.post( "http://ab.cd.e/test", data=BytesIO(json.dumps(body).encode("utf8")), headers=headers ) assert resp.status_code == 200
VerifyRequestBodyTest
python
dagster-io__dagster
python_modules/dagster-test/dagster_test/toys/software_defined_assets.py
{ "start": 216, "end": 1572 }
class ____(IOManager): def handle_output(self, context, obj: DataFrame): assert context assert obj def load_input(self, context): assert context return DataFrame() @asset def daily_temperature_highs(sfo_q2_weather_sample: DataFrame) -> DataFrame: """Computes the temperature high for each day.""" assert sfo_q2_weather_sample time.sleep(3) return DataFrame() @asset def hottest_dates(daily_temperature_highs: DataFrame) -> DataFrame: """Computes the 10 hottest dates. In a more advanced demo, this might perform a complex SQL query to aggregate the data. For now, just imagine that this implements something like: ```sql SELECT temp, date_part('day', date) FROM daily_temperature_highs ORDER BY date DESC; ``` This could make use of [DATE_PART](https://www.postgresql.org/docs/8.1/functions-datetime.html), and we can even link to that because this supports Markdown. This concludes the demo of a long asset description. """ assert daily_temperature_highs time.sleep(3) return DataFrame() software_defined_assets = with_resources( [ daily_temperature_highs, hottest_dates, sfo_q2_weather_sample, ], resource_defs={"io_manager": IOManagerDefinition.hardcoded_io_manager(DummyIOManager())}, )
DummyIOManager
python
sqlalchemy__sqlalchemy
examples/performance/short_selects.py
{ "start": 676, "end": 6024 }
class ____(Base): __tablename__ = "customer" id = Column(Integer, Identity(), primary_key=True) name = Column(String(255)) description = Column(String(255)) q = Column(Integer) p = Column(Integer) x = deferred(Column(Integer)) y = deferred(Column(Integer)) z = deferred(Column(Integer)) Profiler.init("short_selects", num=10000) @Profiler.setup def setup_database(dburl, echo, num): global engine engine = create_engine(dburl, echo=echo) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) sess = Session(engine) sess.add_all( [ Customer( id=i, name="c%d" % i, description="c%d" % i, q=i * 10, p=i * 20, x=i * 30, y=i * 40, ) for i in ids ] ) sess.commit() @Profiler.profile def test_orm_query_classic_style(n): """classic ORM query of the full entity.""" session = Session(bind=engine) for id_ in random.sample(ids, n): session.query(Customer).filter(Customer.id == id_).one() @Profiler.profile def test_orm_query_new_style(n): """new style ORM select() of the full entity.""" session = Session(bind=engine) for id_ in random.sample(ids, n): stmt = future_select(Customer).where(Customer.id == id_) session.execute(stmt).scalar_one() @Profiler.profile def test_orm_query_new_style_using_embedded_lambdas(n): """new style ORM select() of the full entity w/ embedded lambdas.""" session = Session(bind=engine) for id_ in random.sample(ids, n): stmt = future_select(lambda: Customer).where( lambda: Customer.id == id_ ) session.execute(stmt).scalar_one() @Profiler.profile def test_orm_query_new_style_using_external_lambdas(n): """new style ORM select() of the full entity w/ external lambdas.""" session = Session(bind=engine) for id_ in random.sample(ids, n): stmt = lambdas.lambda_stmt(lambda: future_select(Customer)) stmt += lambda s: s.where(Customer.id == id_) session.execute(stmt).scalar_one() @Profiler.profile def test_orm_query_classic_style_cols_only(n): """classic ORM query against columns""" session = Session(bind=engine) for id_ in random.sample(ids, n): session.query(Customer.id, Customer.name, Customer.description).filter( Customer.id == id_ ).one() @Profiler.profile def test_orm_query_new_style_ext_lambdas_cols_only(n): """new style ORM query w/ external lambdas against columns.""" s = Session(bind=engine) for id_ in random.sample(ids, n): stmt = lambdas.lambda_stmt( lambda: future_select( Customer.id, Customer.name, Customer.description ) ) + (lambda s: s.filter(Customer.id == id_)) s.execute(stmt).one() @Profiler.profile def test_baked_query(n): """test a baked query of the full entity.""" bakery = baked.bakery() s = Session(bind=engine) for id_ in random.sample(ids, n): q = bakery(lambda s: s.query(Customer)) q += lambda q: q.filter(Customer.id == bindparam("id")) q(s).params(id=id_).one() @Profiler.profile def test_baked_query_cols_only(n): """test a baked query of only the entity columns.""" bakery = baked.bakery() s = Session(bind=engine) for id_ in random.sample(ids, n): q = bakery( lambda s: s.query(Customer.id, Customer.name, Customer.description) ) q += lambda q: q.filter(Customer.id == bindparam("id")) q(s).params(id=id_).one() @Profiler.profile def test_core_new_stmt_each_time(n): """test core, creating a new statement each time.""" with engine.connect() as conn: for id_ in random.sample(ids, n): stmt = select(Customer.__table__).where(Customer.id == id_) row = conn.execute(stmt).first() tuple(row) @Profiler.profile def test_core_new_stmt_each_time_compiled_cache(n): """test core, creating a new statement each time, but using the cache.""" compiled_cache = {} with engine.connect().execution_options( compiled_cache=compiled_cache ) as conn: for id_ in random.sample(ids, n): stmt = select(Customer.__table__).where(Customer.id == id_) row = conn.execute(stmt).first() tuple(row) @Profiler.profile def test_core_reuse_stmt(n): """test core, reusing the same statement (but recompiling each time).""" stmt = select(Customer.__table__).where(Customer.id == bindparam("id")) with engine.connect() as conn: for id_ in random.sample(ids, n): row = conn.execute(stmt, {"id": id_}).first() tuple(row) @Profiler.profile def test_core_reuse_stmt_compiled_cache(n): """test core, reusing the same statement + compiled cache.""" stmt = select(Customer.__table__).where(Customer.id == bindparam("id")) compiled_cache = {} with engine.connect().execution_options( compiled_cache=compiled_cache ) as conn: for id_ in random.sample(ids, n): row = conn.execute(stmt, {"id": id_}).first() tuple(row) if __name__ == "__main__": Profiler.main()
Customer
python
joblib__joblib
joblib/externals/loky/backend/popen_loky_posix.py
{ "start": 685, "end": 5541 }
class ____: method = "loky" DupFd = _DupFd def __init__(self, process_obj): sys.stdout.flush() sys.stderr.flush() self.returncode = None self._fds = [] self._launch(process_obj) def duplicate_for_child(self, fd): self._fds.append(fd) return reduction._mk_inheritable(fd) def poll(self, flag=os.WNOHANG): if self.returncode is None: while True: try: pid, sts = os.waitpid(self.pid, flag) except OSError: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None else: break if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: if not wait([self.sentinel], timeout): return None # This shouldn't block if wait() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def terminate(self): if self.returncode is None: try: os.kill(self.pid, signal.SIGTERM) except ProcessLookupError: pass except OSError: if self.wait(timeout=0.1) is None: raise def _launch(self, process_obj): tracker_fd = resource_tracker._resource_tracker.getfd() fp = BytesIO() set_spawning_popen(self) try: prep_data = spawn.get_preparation_data( process_obj._name, getattr(process_obj, "init_main_module", True), ) reduction.dump(prep_data, fp) reduction.dump(process_obj, fp) finally: set_spawning_popen(None) try: parent_r, child_w = os.pipe() child_r, parent_w = os.pipe() # for fd in self._fds: # _mk_inheritable(fd) cmd_python = [sys.executable] cmd_python += ["-m", self.__module__] cmd_python += ["--process-name", str(process_obj.name)] cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))] reduction._mk_inheritable(child_w) reduction._mk_inheritable(tracker_fd) self._fds += [child_r, child_w, tracker_fd] if os.name == "posix": mp_tracker_fd = prep_data["mp_tracker_fd"] self.duplicate_for_child(mp_tracker_fd) from .fork_exec import fork_exec pid = fork_exec(cmd_python, self._fds, env=process_obj.env) util.debug( f"launched python with pid {pid} and cmd:\n{cmd_python}" ) self.sentinel = parent_r method = "getbuffer" if not hasattr(fp, method): method = "getvalue" with os.fdopen(parent_w, "wb") as f: f.write(getattr(fp, method)()) self.pid = pid finally: if parent_r is not None: util.Finalize(self, os.close, (parent_r,)) for fd in (child_r, child_w): if fd is not None: os.close(fd) @staticmethod def thread_is_spawning(): return True if __name__ == "__main__": import argparse parser = argparse.ArgumentParser("Command line parser") parser.add_argument( "--pipe", type=int, required=True, help="File handle for the pipe" ) parser.add_argument( "--process-name", type=str, default=None, help="Identifier for debugging purpose", ) args = parser.parse_args() info = {} exitcode = 1 try: with os.fdopen(args.pipe, "rb") as from_parent: process.current_process()._inheriting = True try: prep_data = pickle.load(from_parent) spawn.prepare(prep_data) process_obj = pickle.load(from_parent) finally: del process.current_process()._inheriting exitcode = process_obj._bootstrap() except Exception: print("\n\n" + "-" * 80) print(f"{args.process_name} failed with traceback: ") print("-" * 80) import traceback print(traceback.format_exc()) print("\n" + "-" * 80) finally: if from_parent is not None: from_parent.close() sys.exit(exitcode)
Popen
python
apache__airflow
providers/google/tests/unit/google/cloud/utils/test_credentials_provider.py
{ "start": 25385, "end": 25790 }
class ____: def test_get_project_id_from_service_account_email(self): assert _get_project_id_from_service_account_email(ACCOUNT_3_ANOTHER_PROJECT) == ANOTHER_PROJECT_ID def test_get_project_id_from_service_account_email_wrong_input(self): with pytest.raises(AirflowException): _get_project_id_from_service_account_email("ACCOUNT_1")
TestGetProjectIdFromServiceAccountEmail
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/stateful.py
{ "start": 22473, "end": 23344 }
class ____(SearchStrategy): def __init__(self, name: str, *, consume: bool = False): super().__init__() self.name = name self.consume = consume def do_draw(self, data): machine = data.draw(self_strategy) bundle = machine.bundle(self.name) if not bundle: data.mark_invalid(f"Cannot draw from empty bundle {self.name!r}") # Shrink towards the right rather than the left. This makes it easier # to delete data generated earlier, as when the error is towards the # end there can be a lot of hard to remove padding. position = data.draw_integer(0, len(bundle) - 1, shrink_towards=len(bundle)) if self.consume: return bundle.pop(position) # pragma: no cover # coverage is flaky here else: return bundle[position]
BundleReferenceStrategy
python
numba__numba
numba/experimental/jitclass/base.py
{ "start": 9856, "end": 14208 }
class ____(object): """ A jitclass builder for a mutable jitclass. This will register typing and implementation hooks to the given typing and target contexts. """ class_impl_registry = imputils.Registry('jitclass builder') implemented_methods = set() def __init__(self, class_type, typingctx, targetctx): self.class_type = class_type self.typingctx = typingctx self.targetctx = targetctx def register(self): """ Register to the frontend and backend. """ # Register generic implementations for all jitclasses self._register_methods(self.class_impl_registry, self.class_type.instance_type) # NOTE other registrations are done at the top-level # (see ctor_impl and attr_impl below) self.targetctx.install_registry(self.class_impl_registry) def _register_methods(self, registry, instance_type): """ Register method implementations. This simply registers that the method names are valid methods. Inside of imp() below we retrieve the actual method to run from the type of the receiver argument (i.e. self). """ to_register = list(instance_type.jit_methods) + \ list(instance_type.jit_static_methods) for meth in to_register: # There's no way to retrieve the particular method name # inside the implementation function, so we have to register a # specific closure for each different name if meth not in self.implemented_methods: self._implement_method(registry, meth) self.implemented_methods.add(meth) def _implement_method(self, registry, attr): # create a separate instance of imp method to avoid closure clashing def get_imp(): def imp(context, builder, sig, args): instance_type = sig.args[0] if attr in instance_type.jit_methods: method = instance_type.jit_methods[attr] elif attr in instance_type.jit_static_methods: method = instance_type.jit_static_methods[attr] # imp gets called as a method, where the first argument is # self. We drop this for a static method. sig = sig.replace(args=sig.args[1:]) args = args[1:] disp_type = types.Dispatcher(method) call = context.get_function(disp_type, sig) out = call(builder, args) _add_linking_libs(context, call) return imputils.impl_ret_new_ref(context, builder, sig.return_type, out) return imp def _getsetitem_gen(getset): _dunder_meth = "__%s__" % getset op = getattr(operator, getset) @templates.infer_global(op) class GetSetItem(templates.AbstractTemplate): def generic(self, args, kws): instance = args[0] if isinstance(instance, types.ClassInstanceType) and \ _dunder_meth in instance.jit_methods: meth = instance.jit_methods[_dunder_meth] disp_type = types.Dispatcher(meth) sig = disp_type.get_call_type(self.context, args, kws) return sig # lower both {g,s}etitem and __{g,s}etitem__ to catch the calls # from python and numba imputils.lower_builtin((types.ClassInstanceType, _dunder_meth), types.ClassInstanceType, types.VarArg(types.Any))(get_imp()) imputils.lower_builtin(op, types.ClassInstanceType, types.VarArg(types.Any))(get_imp()) dunder_stripped = attr.strip('_') if dunder_stripped in ("getitem", "setitem"): _getsetitem_gen(dunder_stripped) else: registry.lower((types.ClassInstanceType, attr), types.ClassInstanceType, types.VarArg(types.Any))(get_imp()) @templates.infer_getattr
ClassBuilder
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/linalg_ops_test.py
{ "start": 2423, "end": 3818 }
class ____(test.TestCase): def setUp(self): self.rng = np.random.RandomState(42) @test_util.run_deprecated_v1 def test_works_with_five_different_random_pos_def_matrices(self): for n in range(1, 6): for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5), (np.complex64, 0.05), (np.complex128, 1e-5)]: with self.subTest(n=n, np_dtype=np_dtype, atol=atol): matrix = _RandomPDMatrix(n, self.rng, np_dtype) _, logdet_np = np.linalg.slogdet(matrix) with self.session(): # Create 2 x n x n matrix # matrix = np.array( # [_RandomPDMatrix(n, self.rng, np_dtype), # _RandomPDMatrix(n, self.rng, np_dtype)]).astype(np_dtype) logdet_tf = linalg.logdet(matrix) self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol) def test_works_with_underflow_case(self): for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5), (np.complex64, 0.05), (np.complex128, 1e-5)]: with self.subTest(np_dtype=np_dtype, atol=atol): matrix = (np.eye(20) * 1e-6).astype(np_dtype) _, logdet_np = np.linalg.slogdet(matrix) with self.session(): logdet_tf = linalg.logdet(matrix) self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
LogdetTest
python
doocs__leetcode
solution/2800-2899/2809.Minimum Time to Make Array Sum At Most x/Solution.py
{ "start": 0, "end": 589 }
class ____: def minimumTime(self, nums1: List[int], nums2: List[int], x: int) -> int: n = len(nums1) f = [[0] * (n + 1) for _ in range(n + 1)] for i, (a, b) in enumerate(sorted(zip(nums1, nums2), key=lambda z: z[1]), 1): for j in range(n + 1): f[i][j] = f[i - 1][j] if j > 0: f[i][j] = max(f[i][j], f[i - 1][j - 1] + a + b * j) s1 = sum(nums1) s2 = sum(nums2) for j in range(n + 1): if s1 + s2 * j - f[n][j] <= x: return j return -1
Solution
python
pandas-dev__pandas
pandas/tests/indexing/test_loc.py
{ "start": 91554, "end": 92684 }
class ____: @pytest.mark.parametrize("bool_value", [True, False]) def test_loc_bool_incompatible_index_raises( self, index, frame_or_series, bool_value ): # GH20432 message = f"{bool_value}: boolean label can not be used without a boolean index" if index.inferred_type != "boolean": obj = frame_or_series(index=index, dtype="object") with pytest.raises(KeyError, match=message): obj.loc[bool_value] @pytest.mark.parametrize("bool_value", [True, False]) def test_loc_bool_should_not_raise(self, frame_or_series, bool_value): obj = frame_or_series( index=Index([True, False], dtype="boolean"), dtype="object" ) obj.loc[bool_value] def test_loc_bool_slice_raises(self, index, frame_or_series): # GH20432 message = ( r"slice\(True, False, None\): boolean values can not be used in a slice" ) obj = frame_or_series(index=index, dtype="object") with pytest.raises(TypeError, match=message): obj.loc[True:False]
TestLocBooleanLabelsAndSlices
python
ray-project__ray
python/ray/train/examples/pytorch/torch_linear_example.py
{ "start": 226, "end": 4264 }
class ____(torch.utils.data.Dataset): """y = a * x + b""" def __init__(self, a, b, size=1000): x = np.arange(0, 10, 10 / size, dtype=np.float32) self.x = torch.from_numpy(x) self.y = torch.from_numpy(a * x + b) def __getitem__(self, index): return self.x[index, None], self.y[index, None] def __len__(self): return len(self.x) def train_epoch(epoch, dataloader, model, loss_fn, optimizer): if train.get_context().get_world_size() > 1: dataloader.sampler.set_epoch(epoch) for X, y in dataloader: # Compute prediction error pred = model(X) loss = loss_fn(pred, y) # Backpropagation optimizer.zero_grad() loss.backward() optimizer.step() def validate_epoch(dataloader, model, loss_fn): num_batches = len(dataloader) model.eval() loss = 0 with torch.no_grad(): for X, y in dataloader: pred = model(X) loss += loss_fn(pred, y).item() loss /= num_batches import copy model_copy = copy.deepcopy(model) return model_copy.cpu().state_dict(), loss def train_func(config): data_size = config.get("data_size", 1000) val_size = config.get("val_size", 400) batch_size = config.get("batch_size", 32) hidden_size = config.get("hidden_size", 1) lr = config.get("lr", 1e-2) epochs = config.get("epochs", 3) train_dataset = LinearDataset(2, 5, size=data_size) val_dataset = LinearDataset(2, 5, size=val_size) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size) validation_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size) train_loader = train.torch.prepare_data_loader(train_loader) validation_loader = train.torch.prepare_data_loader(validation_loader) model = nn.Linear(1, hidden_size) model = train.torch.prepare_model(model) loss_fn = nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=lr) results = [] for epoch in range(epochs): train_epoch(epoch, train_loader, model, loss_fn, optimizer) state_dict, loss = validate_epoch(validation_loader, model, loss_fn) result = dict(loss=loss) results.append(result) with tempfile.TemporaryDirectory() as tmpdir: torch.save(state_dict, os.path.join(tmpdir, "model.pt")) train.report(result, checkpoint=Checkpoint.from_directory(tmpdir)) return results def train_linear(num_workers=2, use_gpu=False, epochs=3, storage_path=None): config = {"lr": 1e-2, "hidden_size": 1, "batch_size": 4, "epochs": epochs} trainer = TorchTrainer( train_loop_per_worker=train_func, train_loop_config=config, scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu), run_config=RunConfig(storage_path=storage_path), ) result = trainer.fit() print(result.metrics) return result.metrics if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--address", required=False, type=str, help="the address to use for Ray" ) parser.add_argument( "--num-workers", "-n", type=int, default=2, help="Sets number of workers for training.", ) parser.add_argument( "--use-gpu", action="store_true", help="Whether to use GPU for training." ) parser.add_argument( "--epochs", type=int, default=3, help="Number of epochs to train for." ) parser.add_argument( "--smoke-test", action="store_true", default=False, help="Finish quickly for testing.", ) args, _ = parser.parse_known_args() import ray if args.smoke_test: # 2 workers + 1 for trainer. ray.init(num_cpus=3) train_linear() else: ray.init(address=args.address) train_linear( num_workers=args.num_workers, use_gpu=args.use_gpu, epochs=args.epochs )
LinearDataset
python
kamyu104__LeetCode-Solutions
Python/count-substrings-with-only-one-distinct-letter.py
{ "start": 29, "end": 372 }
class ____(object): def countLetters(self, S): """ :type S: str :rtype: int """ result = len(S) left = 0 for right in xrange(1, len(S)): if S[right] == S[left]: result += right-left else: left = right return result
Solution
python
plotly__plotly.py
plotly/graph_objs/cone/colorbar/_title.py
{ "start": 233, "end": 3950 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "cone.colorbar" _path_str = "cone.colorbar.title" _valid_props = {"font", "side", "text"} @property def font(self): """ Sets this color bar's title font. The 'font' property is an instance of Font that may be specified as: - An instance of :class:`plotly.graph_objs.cone.colorbar.title.Font` - A dict of string/value properties that will be passed to the Font constructor Returns ------- plotly.graph_objs.cone.colorbar.title.Font """ return self["font"] @font.setter def font(self, val): self["font"] = val @property def side(self): """ Determines the location of color bar's title with respect to the color bar. Defaults to "top" when `orientation` if "v" and defaults to "right" when `orientation` if "h". The 'side' property is an enumeration that may be specified as: - One of the following enumeration values: ['right', 'top', 'bottom'] Returns ------- Any """ return self["side"] @side.setter def side(self, val): self["side"] = val @property def text(self): """ Sets the title of the color bar. The 'text' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["text"] @text.setter def text(self, val): self["text"] = val @property def _prop_descriptions(self): return """\ font Sets this color bar's title font. side Determines the location of color bar's title with respect to the color bar. Defaults to "top" when `orientation` if "v" and defaults to "right" when `orientation` if "h". text Sets the title of the color bar. """ def __init__(self, arg=None, font=None, side=None, text=None, **kwargs): """ Construct a new Title object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.cone.colorbar.Title` font Sets this color bar's title font. side Determines the location of color bar's title with respect to the color bar. Defaults to "top" when `orientation` if "v" and defaults to "right" when `orientation` if "h". text Sets the title of the color bar. Returns ------- Title """ super().__init__("title") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.cone.colorbar.Title constructor must be a dict or an instance of :class:`plotly.graph_objs.cone.colorbar.Title`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("font", arg, font) self._set_property("side", arg, side) self._set_property("text", arg, text) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Title
python
django-extensions__django-extensions
django_extensions/management/commands/set_default_site.py
{ "start": 207, "end": 2891 }
class ____(BaseCommand): help = "Set parameters of the default django.contrib.sites Site" def add_arguments(self, parser): super().add_arguments(parser) parser.add_argument( "--name", dest="site_name", default=None, help="Use this as site name." ) parser.add_argument( "--domain", dest="site_domain", default=None, help="Use this as site domain.", ) parser.add_argument( "--system-fqdn", dest="set_as_system_fqdn", default=False, action="store_true", help="Use the systems FQDN (Fully Qualified Domain Name) as name " "and domain. Can be used in combination with --name", ) @signalcommand def handle(self, *args, **options): if not apps.is_installed("django.contrib.sites"): raise CommandError("The sites framework is not installed.") from django.contrib.sites.models import Site try: site = Site.objects.get(pk=settings.SITE_ID) except Site.DoesNotExist: raise CommandError( "Default site with pk=%s does not exist" % settings.SITE_ID ) else: name = options["site_name"] domain = options["site_domain"] set_as_system_fqdn = options["set_as_system_fqdn"] if all([domain, set_as_system_fqdn]): raise CommandError( "The set_as_system_fqdn cannot be used with domain option." ) # noqa if set_as_system_fqdn: domain = socket.getfqdn() if not domain: raise CommandError("Cannot find systems FQDN") if name is None: name = domain update_kwargs = {} if name and name != site.name: update_kwargs["name"] = name if domain and domain != site.domain: update_kwargs["domain"] = domain if update_kwargs: Site.objects.filter(pk=settings.SITE_ID).update(**update_kwargs) site = Site.objects.get(pk=settings.SITE_ID) print( "Updated default site. You might need to restart django as sites" " are cached aggressively." ) else: print("Nothing to update (need --name, --domain and/or --system-fqdn)") print("Default Site:") print("\tid = %s" % site.id) print("\tname = %s" % site.name) print("\tdomain = %s" % site.domain)
Command
python
coleifer__peewee
playhouse/reflection.py
{ "start": 1117, "end": 5006 }
class ____(object): """ Store metadata about a database column. """ primary_key_types = (IntegerField, AutoField) def __init__(self, name, field_class, raw_column_type, nullable, primary_key=False, column_name=None, index=False, unique=False, default=None, extra_parameters=None): self.name = name self.field_class = field_class self.raw_column_type = raw_column_type self.nullable = nullable self.primary_key = primary_key self.column_name = column_name self.index = index self.unique = unique self.default = default self.extra_parameters = extra_parameters # Foreign key metadata. self.rel_model = None self.related_name = None self.to_field = None def __repr__(self): attrs = [ 'field_class', 'raw_column_type', 'nullable', 'primary_key', 'column_name'] keyword_args = ', '.join( '%s=%s' % (attr, getattr(self, attr)) for attr in attrs) return 'Column(%s, %s)' % (self.name, keyword_args) def get_field_parameters(self): params = {} if self.extra_parameters is not None: params.update(self.extra_parameters) # Set up default attributes. if self.nullable: params['null'] = True if self.field_class is ForeignKeyField or self.name != self.column_name: params['column_name'] = "'%s'" % self.column_name if self.primary_key and not issubclass(self.field_class, AutoField): params['primary_key'] = True if self.default is not None: params['constraints'] = '[SQL("DEFAULT %s")]' % \ self.default.replace('"', '\\"') # Handle ForeignKeyField-specific attributes. if self.is_foreign_key(): params['model'] = self.rel_model if self.to_field: params['field'] = "'%s'" % self.to_field if self.related_name: params['backref'] = "'%s'" % self.related_name # Handle indexes on column. if not self.is_primary_key(): if self.unique: params['unique'] = 'True' elif self.index and not self.is_foreign_key(): params['index'] = 'True' return params def is_primary_key(self): return self.field_class is AutoField or self.primary_key def is_foreign_key(self): return self.field_class is ForeignKeyField def is_self_referential_fk(self): return (self.field_class is ForeignKeyField and self.rel_model == "'self'") def set_foreign_key(self, foreign_key, model_names, dest=None, related_name=None): self.foreign_key = foreign_key self.field_class = ForeignKeyField if foreign_key.dest_table == foreign_key.table: self.rel_model = "'self'" else: self.rel_model = model_names[foreign_key.dest_table] self.to_field = dest and dest.name or None self.related_name = related_name or None def get_field(self): # Generate the field definition for this column. field_params = {} for key, value in self.get_field_parameters().items(): if isclass(value) and issubclass(value, Field): value = value.__name__ field_params[key] = value param_str = ', '.join('%s=%s' % (k, v) for k, v in sorted(field_params.items())) field = '%s = %s(%s)' % ( self.name, self.field_class.__name__, param_str) if self.field_class is UnknownField: field = '%s # %s' % (field, self.raw_column_type) return field
Column
python
astropy__astropy
astropy/utils/iers/iers.py
{ "start": 3083, "end": 3982 }
class ____(IERSWarning): """ Downloaded IERS table may be stale. """ def download_file(*args, **kwargs): """ Overload astropy.utils.data.download_file within iers module to use a custom (longer) wait time. This just passes through ``*args`` and ``**kwargs`` after temporarily setting the download_file remote timeout to the local ``iers.conf.remote_timeout`` value. """ kwargs.setdefault( "http_headers", { "User-Agent": "astropy/iers", "Accept": "*/*", }, ) with utils.data.conf.set_temp("remote_timeout", conf.remote_timeout): return utils.data.download_file(*args, **kwargs) def _none_to_float(value): """ Convert None to a valid floating point value. Especially for auto_max_age = None. """ return value if value is not None else np.finfo(float).max
IERSStaleWarning
python
openai__openai-python
src/openai/types/chat/chat_completion_content_part_param.py
{ "start": 910, "end": 1259 }
class ____(TypedDict, total=False): file: Required[FileFile] type: Required[Literal["file"]] """The type of the content part. Always `file`.""" ChatCompletionContentPartParam: TypeAlias = Union[ ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam, File, ]
File
python
great-expectations__great_expectations
tests/scripts/test_public_api_report.py
{ "start": 2401, "end": 4784 }
class ____: pass """ @pytest.fixture def sample_markdown_doc_with_yaml() -> str: return """# Title Content. More content. Some yaml: yaml_contents = \"\"\" name: {datasource_name} class_name: Something \"\"\" End of content. """ @pytest.fixture def repo_root(tmp_path) -> pathlib.Path: return tmp_path @pytest.fixture def sample_docs_example_python_file_string_filepath( repo_root: pathlib.Path, ) -> pathlib.Path: path = ( repo_root / pathlib.Path("tests/integration/docusaurus/sample_docs_example_python_file_string.py") ).relative_to(repo_root) path.touch() return path @pytest.fixture def sample_with_definitions_python_file_string_filepath( repo_root: pathlib.Path, ) -> pathlib.Path: path = (repo_root / pathlib.Path("sample_with_definitions_python_file_string.py")).relative_to( repo_root ) path.touch() return path @pytest.fixture def sample_docs_example_file_contents( sample_docs_example_python_file_string: str, sample_docs_example_python_file_string_filepath: pathlib.Path, ) -> FileContents: return FileContents( filepath=sample_docs_example_python_file_string_filepath, contents=sample_docs_example_python_file_string, ) @pytest.fixture def sample_with_definitions_file_contents( sample_with_definitions_python_file_string: str, sample_with_definitions_python_file_string_filepath: pathlib.Path, ) -> FileContents: return FileContents( filepath=sample_with_definitions_python_file_string_filepath, contents=sample_with_definitions_python_file_string, ) @pytest.fixture def sample_markdown_doc_with_yaml_file_contents( sample_markdown_doc_with_yaml: str, ) -> FileContents: return FileContents( filepath=pathlib.Path("some/random/filepath/markdown.md"), contents=sample_markdown_doc_with_yaml, ) @pytest.fixture def docs_example_parser( sample_docs_example_file_contents: FileContents, ) -> DocsExampleParser: docs_example_parser = DocsExampleParser(file_contents={sample_docs_example_file_contents}) return docs_example_parser @pytest.fixture def empty_docs_example_parser( sample_docs_example_file_contents: FileContents, ) -> DocsExampleParser: docs_example_parser = DocsExampleParser(file_contents=set()) return docs_example_parser
ExamplePublicAPIClass
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dlp.py
{ "start": 21568, "end": 22372 }
class ____: @mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook") def test_list_job_triggers(self, mock_hook): mock_hook.return_value.list_job_triggers.return_value = mock.MagicMock() operator = CloudDLPListJobTriggersOperator(project_id=PROJECT_ID, task_id="id") operator.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=None, ) mock_hook.return_value.list_job_triggers.assert_called_once_with( project_id=PROJECT_ID, page_size=None, order_by=None, results_filter=None, retry=DEFAULT, timeout=None, metadata=(), )
TestCloudDLPListJobTriggersOperator
python
astropy__astropy
astropy/utils/masked/tests/test_function_helpers.py
{ "start": 13396, "end": 17951 }
class ____(MaskedArraySetup): def test_put(self): ma = self.ma.copy() v = Masked([50, 150], [False, True]) np.put(ma, [0, 2], v) expected = self.a.copy() np.put(expected, [0, 2], [50, 150]) expected_mask = self.mask_a.copy() np.put(expected_mask, [0, 2], [False, True]) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) np.put(ma, [1, 2], np.ma.masked) np.put(expected_mask, [1, 2], True) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) np.put(ma, [0, 1], np.ma.nomask) np.put(expected_mask, [0, 1], False) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) with pytest.raises(TypeError): # Indices cannot be masked. np.put(ma, Masked([0, 2]), v) with pytest.raises(TypeError): # Array to put masked values in must be masked. np.put(self.a.copy(), [0, 2], v) def test_putmask(self): ma = self.ma.flatten() mask = np.array([True, False, False, False, True, False]) values = Masked( np.arange(100, 650, 100), mask=[False, True, True, True, False, False] ) np.putmask(ma, mask, values) expected = self.a.flatten() np.putmask(expected, mask, values.unmasked) expected_mask = self.mask_a.flatten() np.putmask(expected_mask, mask, values.mask) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) np.putmask(ma, ~mask, np.ma.masked) np.putmask(expected_mask, ~mask, True) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) np.putmask(ma, mask, np.ma.nomask) np.putmask(expected_mask, mask, False) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) with pytest.raises(TypeError): np.putmask(self.a.flatten(), mask, values) def test_place(self): ma = self.ma.flatten() mask = np.array([True, False, False, False, True, False]) values = Masked([100, 200], mask=[False, True]) np.place(ma, mask, values) expected = self.a.flatten() np.place(expected, mask, values.unmasked) expected_mask = self.mask_a.flatten() np.place(expected_mask, mask, values.mask) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) np.place(ma, ~mask, np.ma.masked) np.place(expected_mask, ~mask, True) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) np.place(ma, mask, np.ma.nomask) np.place(expected_mask, mask, False) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) with pytest.raises(TypeError): np.place(self.a.flatten(), mask, values) def test_copyto(self): ma = self.ma.flatten() mask = np.array([True, False, False, False, True, False]) values = Masked( np.arange(100, 650, 100), mask=[False, True, True, True, False, False] ) np.copyto(ma, values, where=mask) expected = self.a.flatten() np.copyto(expected, values.unmasked, where=mask) expected_mask = self.mask_a.flatten() np.copyto(expected_mask, values.mask, where=mask) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) np.copyto(ma, np.ma.masked, where=~mask) np.copyto(expected_mask, True, where=~mask) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) np.copyto(ma, np.ma.nomask, where=mask) np.copyto(expected_mask, False, where=mask) assert_array_equal(ma.unmasked, expected) assert_array_equal(ma.mask, expected_mask) with pytest.raises(TypeError): np.copyto(self.a.flatten(), values, where=mask) @pytest.mark.parametrize("value", [0.25, np.ma.masked]) def test_fill_diagonal(self, value): ma = self.ma[:2, :2].copy() np.fill_diagonal(ma, value) expected = ma.copy() expected[np.diag_indices_from(expected)] = value assert_array_equal(ma.unmasked, expected.unmasked) assert_array_equal(ma.mask, expected.mask)
TestSettingParts
python
instagram__MonkeyType
tests/test_stubs.py
{ "start": 1284, "end": 1741 }
class ____: def test_merge(self): a = ImportMap() a['module.a'] = {'ClassA', 'ClassB'} a['module.b'] = {'ClassE', 'ClassF'} b = ImportMap() b['module.a'] = {'ClassB', 'ClassC'} b['module.c'] = {'ClassX', 'ClassY'} expected = ImportMap() for mod in ('module.a', 'module.b', 'module.c'): expected[mod] = a[mod] | b[mod] a.merge(b) assert a == expected
TestImportMap
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py
{ "start": 9729, "end": 9864 }
class ____: pass # ====== Cool constants ======== BANANA = 100 APPLE = 200 # end # https://github.com/astral-sh/ruff/issues/19752
A
python
run-llama__llama_index
llama-index-integrations/tools/llama-index-tools-mcp/llama_index/tools/mcp/tool_spec_mixins.py
{ "start": 377, "end": 3028 }
class ____: def _resolve_field_type( self: "McpToolSpec", field_schema: dict, defs: dict, ) -> Any: """Resolve the Python type from a field schema.""" if "$ref" in field_schema: return self._resolve_reference(field_schema, defs) if "enum" in field_schema: return Literal[tuple(field_schema["enum"])] if "anyOf" in field_schema: return self._resolve_union_type(field_schema, defs) return self._resolve_basic_type(field_schema, defs) def _resolve_reference( self: "McpToolSpec", field_schema: dict, defs: dict, ) -> Any: """Resolve a $ref reference.""" ref_name = self._extract_ref_name(field_schema["$ref"]) if ref_name not in defs: return self.properties_cache.get(ref_name) ref_schema = defs[ref_name] if "anyOf" in ref_schema: return self._resolve_union_type(ref_schema, defs) if self._is_simple_array(ref_schema): return self._create_list_type(ref_schema, defs) if self._is_simple_object(ref_schema): return self._create_dict_type(ref_schema, defs) return self.properties_cache.get(ref_name) or self._create_model( ref_schema, ref_name, defs, ) def _resolve_union_type( self: "McpToolSpec", schema: dict, defs: dict, ) -> Any: """Resolve a Union type (anyOf).""" union_types = [ self._resolve_union_option(option, defs) for option in schema["anyOf"] ] return Union[tuple(union_types)] if len(union_types) > 1 else union_types[0] def _resolve_union_option( self: "McpToolSpec", option: dict, defs: dict, ) -> Any: """Resolve a single option in a union type.""" if "$ref" in option: return self._resolve_reference(option, defs) if option.get("type") == "null": return type(None) return self._resolve_basic_type(option, defs) def _resolve_basic_type( self: "McpToolSpec", schema: dict, defs: dict, ) -> Any: """Resolve a basic JSON Schema type.""" json_type = schema.get("type", "string") json_type = json_type[0] if isinstance(json_type, list) else json_type if self._is_simple_array(schema): return self._create_list_type(schema, defs) if self._is_simple_object(schema): return self._create_dict_type(schema, defs) return json_type_mapping.get(json_type, str)
TypeResolutionMixin
python
pennersr__django-allauth
allauth/account/forms.py
{ "start": 23359, "end": 23799 }
class ____(PasswordVerificationMixin, UserForm): password1 = SetPasswordField(label=_("Password")) password2 = PasswordField(label=_("Password (again)")) def __init__(self, *args, **kwargs): super(SetPasswordForm, self).__init__(*args, **kwargs) self.fields["password1"].user = self.user def save(self): flows.password_change.change_password(self.user, self.cleaned_data["password1"])
SetPasswordForm
python
tensorflow__tensorflow
tensorflow/python/ops/variable_scope.py
{ "start": 58988, "end": 77625 }
class ____: """Wrapper allowing functional layers to be used with eager execution. When eager execution is enabled Variables get deleted when they go out of scope, and are not stored in global collections by default. A lot of code (mostly the functional layers in tf.layers) assumes that variables are kept in a global list. EagerVariableStore can be used in conjunction with this code to make it eager-friendly. For example, to create a dense layer, use: ``` container = tfe.EagerVariableStore() for input in dataset_iterator: with container.as_default(): x = tf.compat.v1.layers.dense(input, name="l1") print(container.variables) # Should print the variables used in the layer. ``` """ def __init__(self, store=None): if store is not None: if not store._store_eager_variables: # pylint: disable=protected-access raise ValueError("Cannot construct EagerVariableStore from a " "VariableStore object that does not hold eager " "variables.") self._store = store else: self._store = _VariableStore() self._store._store_eager_variables = True # pylint: disable=protected-access def as_default(self): return with_variable_store(self._store) def variables(self): return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access def trainable_variables(self): # pylint: disable=protected-access return sorted([x for x in self._store._vars.values() if x.trainable], key=lambda x: x.name) # pylint: enable=protected-access def non_trainable_variables(self): # pylint: disable=protected-access return sorted([x for x in self._store._vars.values() if not x.trainable], key=lambda x: x.name) # pylint: enable=protected-access def copy(self): """Copy this variable store and all of its contents. Variables contained in this store will be copied over to the new variable store, meaning that they can be modified without affecting the variables in this store. Returns: A new EagerVariableStore instance containing copied variables. """ # pylint: disable=protected-access new_store = EagerVariableStore() for key, var in self._store._vars.items(): # Strip device out of variable name. try: index = var.name.index(":") except ValueError: stripped_var_name = var.name else: stripped_var_name = var.name[:index] # Create new variable with same value, name, and "trainable" flag. new_var = resource_variable_ops.ResourceVariable( var.read_value(), name=stripped_var_name, trainable=var.trainable) new_store._store._vars[key] = new_var return new_store # pylint: enable=protected-access # The argument list for get_variable must match arguments to get_local_variable. # So, if you are updating the arguments, also update arguments to # get_local_variable below. @tf_export(v1=["get_variable"]) def get_variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): return get_variable_scope().get_variable( _get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, custom_getter=custom_getter, constraint=constraint, synchronization=synchronization, aggregation=aggregation) get_variable_or_local_docstring = ("""%s @compatibility(TF2) Although it is a legacy `compat.v1` api, `tf.compat.v1.get_variable` is mostly compatible with eager execution and `tf.function` but only if you combine it with the `tf.compat.v1.keras.utils.track_tf1_style_variables` decorator. (Though it will behave as if reuse is always set to `AUTO_REUSE`.) See the [model migration guide](https://www.tensorflow.org/guide/migrate/model_mapping) for more info. If you do not combine it with `tf.compat.v1.keras.utils.track_tf1_style_variables`, `get_variable` will create a brand new variable every single time it is called and will never reuse variables, regardless of variable names or `reuse` arguments. The TF2 equivalent of this symbol would be `tf.Variable`, but note that when using `tf.Variable` you must make sure you track your variables (and regularizer arguments) either manually or via `tf.Module` or `tf.keras.layers.Layer` mechanisms. A section of the [migration guide](https://www.tensorflow.org/guide/migrate/model_mapping#incremental_migration_to_native_tf2) provides more details on incrementally migrating these usages to `tf.Variable` as well. Note: The `partitioner` arg is not compatible with TF2 behaviors even when using `tf.compat.v1.keras.utils.track_tf1_style_variables`. It can be replaced by using `ParameterServerStrategy` and its partitioners. See the [multi-gpu migration guide](https://www.tensorflow.org/guide/migrate/multi_worker_cpu_gpu_training) and the ParameterServerStrategy guides it references for more info. @end_compatibility %sThis function prefixes the name with the current variable scope and performs reuse checks. See the [Variable Scope How To](https://tensorflow.org/guide/variables) for an extensive description of how reusing works. Here is a basic example: ```python def foo(): with tf.variable_scope("foo", reuse=tf.AUTO_REUSE): v = tf.get_variable("v", [1]) return v v1 = foo() # Creates v. v2 = foo() # Gets the same, existing v. assert v1 == v2 ``` If initializer is `None` (the default), the default initializer passed in the variable scope will be used. If that one is `None` too, a `glorot_uniform_initializer` will be used. The initializer can also be a Tensor, in which case the variable is initialized to this value and shape. Similarly, if the regularizer is `None` (the default), the default regularizer passed in the variable scope will be used (if that is `None` too, then by default no regularization is performed). If a partitioner is provided, a `PartitionedVariable` is returned. Accessing this object as a `Tensor` returns the shards concatenated along the partition axis. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. Can either be an initializer object or a Tensor. If it's a Tensor, its shape must be known unless validate_shape is False. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection `tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization. %scollections: List of graph collections keys to add the Variable to. Defaults to `[%s]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. For this to be used the initializer must be a Tensor and not an initializer object. use_resource: If False, creates a regular Variable. If true, creates an experimental ResourceVariable instead with well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. custom_getter: Callable that takes as a first argument the true getter, and allows overwriting the internal get_variable method. The signature of `custom_getter` should match that of this method, but the most future-proof version will allow for changes: `def custom_getter(getter, *args, **kwargs)`. Direct access to all `get_variable` parameters is also allowed: `def custom_getter(getter, name, *args, **kwargs)`. A simple identity custom getter that simply creates variables with modified names is: ```python def custom_getter(getter, name, *args, **kwargs): return getter(name + '_suffix', *args, **kwargs) ``` constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: The created or existing `Variable` (or `PartitionedVariable`, if a partitioner was used). Raises: ValueError: when creating a new variable and shape is not declared, when violating reuse during variable creation, or when `initializer` dtype and `dtype` don't match. Reuse is set inside `variable_scope`. """) get_variable.__doc__ = get_variable_or_local_docstring % ( "Gets an existing variable with these parameters or create a new one.", "", "trainable: If `True` also add the variable to the graph collection\n" " `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ", "GraphKeys.GLOBAL_VARIABLES") # The argument list for get_local_variable must match arguments to get_variable. # So, if you are updating the arguments, also update arguments to get_variable. @tf_export(v1=["get_local_variable"]) def get_local_variable( # pylint: disable=missing-docstring name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=False, # pylint: disable=unused-argument collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, custom_getter=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): if collections: collections += [ops.GraphKeys.LOCAL_VARIABLES] else: collections = [ops.GraphKeys.LOCAL_VARIABLES] return get_variable( name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=False, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, custom_getter=custom_getter, constraint=constraint) get_local_variable.__doc__ = get_variable_or_local_docstring % ( "Gets an existing *local* variable or creates a new one.", "Behavior is the same as in `get_variable`, except that variables are\n" "added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n" "`False`.\n", "", "GraphKeys.LOCAL_VARIABLES") def _get_partitioned_variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, partitioner=None, validate_shape=True, use_resource=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE): """Gets or creates a sharded variable list with these parameters. The `partitioner` must be a callable that accepts a fully defined `TensorShape` and returns a sequence of integers (the `partitions`). These integers describe how to partition the given sharded `Variable` along the given dimension. That is, `partitions[1] = 3` means split the `Variable` into 3 shards along dimension 1. Currently, sharding along only one axis is supported. If the list of variables with the given name (prefix) is already stored, we return the stored variables. Otherwise, we create a new one. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If the initializer is a callable, then it will be called for each shard. Otherwise the initializer should match the shape of the entire sharded Variable, and it will be sliced accordingly for each shard. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates an experimental ResourceVariable instead which has well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: A tuple `(shards, partitions)` where `shards` is the list of `Variable` shards and `partitions` is the output of the partitioner on the input shape. Raises: ValueError: when creating a new variable and shape is not declared, or when violating reuse during variable creation. Reuse is set inside `variable_scope`. """ # pylint: disable=protected-access scope = get_variable_scope() if scope.custom_getter is not None: raise ValueError( "Private access to _get_partitioned_variable is not allowed when " "a custom getter is set. Current custom getter: %s. " "It is likely that you're using create_partitioned_variables. " "If so, consider instead using get_variable with a non-empty " "partitioner parameter instead." % scope.custom_getter) return scope._get_partitioned_variable( _get_default_variable_store(), name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, trainable=trainable, collections=collections, caching_device=caching_device, partitioner=partitioner, validate_shape=validate_shape, use_resource=use_resource, constraint=constraint, synchronization=synchronization, aggregation=aggregation) # pylint: enable=protected-access # Named like a function for compatibility with the previous # @tf_contextlib.contextmanager definition.
EagerVariableStore
python
PrefectHQ__prefect
src/integrations/prefect-shell/prefect_shell/commands.py
{ "start": 6450, "end": 15226 }
class ____(JobBlock[list[str]]): """ A block representing a shell operation, containing multiple commands. For long-lasting operations, use the trigger method and utilize the block as a context manager for automatic closure of processes when context is exited. If not, manually call the close method to close processes. For short-lasting operations, use the run method. Context is automatically managed with this method. Attributes: commands: A list of commands to execute sequentially. stream_output: Whether to stream output. env: A dictionary of environment variables to set for the shell operation. working_dir: The working directory context the commands will be executed within. shell: The shell to use to execute the commands. extension: The extension to use for the temporary file. if unset defaults to `.ps1` on Windows and `.sh` on other platforms. Examples: Load a configured block: ```python from prefect_shell import ShellOperation shell_operation = ShellOperation.load("BLOCK_NAME") ``` """ _block_type_name = "Shell Operation" _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/0b47a017e1b40381de770c17647c49cdf6388d1c-250x250.png" # noqa: E501 _documentation_url = "https://docs.prefect.io/integrations/prefect-shell" # noqa commands: list[str] = Field( default=..., description="A list of commands to execute sequentially." ) stream_output: bool = Field(default=True, description="Whether to stream output.") env: dict[str, str] = Field( default_factory=dict, title="Environment Variables", description="Environment variables to use for the subprocess.", ) working_dir: Optional[DirectoryPath] = Field( default=None, title="Working Directory", description=( "The absolute path to the working directory " "the command will be executed within." ), ) shell: Optional[str] = Field( default=None, description=( "The shell to run the command with; if unset, " "defaults to `powershell` on Windows and `bash` on other platforms." ), ) extension: Optional[str] = Field( default=None, description=( "The extension to use for the temporary file; if unset, " "defaults to `.ps1` on Windows and `.sh` on other platforms." ), ) _exit_stack: AsyncExitStack = PrivateAttr( default_factory=AsyncExitStack, ) @contextmanager def _prep_trigger_command(self) -> Generator[list[str], None, None]: """ Write the commands to a temporary file, handling all the details of creating the file and cleaning it up afterwards. Then, return the command to run the temporary file. """ temp_file = None try: extension = self.extension or (".ps1" if sys.platform == "win32" else ".sh") temp_file = tempfile.NamedTemporaryFile( prefix="prefect-", suffix=extension, delete=False, ) joined_commands = os.linesep.join(self.commands) self.logger.debug( f"Writing the following commands to " f"{temp_file.name!r}:{os.linesep}{joined_commands}" ) temp_file.write(joined_commands.encode()) if self.shell is None and sys.platform == "win32" or extension == ".ps1": shell = "powershell" elif self.shell is None: shell = "bash" else: shell = self.shell.lower() if shell == "powershell": # if powershell, set exit code to that of command temp_file.write("\r\nExit $LastExitCode".encode()) temp_file.close() trigger_command = [shell, temp_file.name] yield trigger_command finally: if temp_file is not None and os.path.exists(temp_file.name): os.remove(temp_file.name) def _compile_kwargs(self, **open_kwargs: dict[str, Any]) -> dict[str, Any]: """ Helper method to compile the kwargs for `open_process` so it's not repeated across the run and trigger methods. """ trigger_command = self._exit_stack.enter_context(self._prep_trigger_command()) input_env = os.environ.copy() input_env.update(self.env) input_open_kwargs = dict( command=trigger_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=input_env, cwd=self.working_dir, **open_kwargs, ) return input_open_kwargs @sync_compatible async def trigger(self, **open_kwargs: dict[str, Any]) -> ShellProcess: """ Triggers a shell command and returns the shell command run object to track the execution of the run. This method is ideal for long-lasting shell commands; for short-lasting shell commands, it is recommended to use the `run` method instead. Args: **open_kwargs: Additional keyword arguments to pass to `open_process`. Returns: A `ShellProcess` object. Examples: Sleep for 5 seconds and then print "Hello, world!": ```python from prefect_shell import ShellOperation with ShellOperation( commands=["sleep 5", "echo 'Hello, world!'"], ) as shell_operation: shell_process = shell_operation.trigger() shell_process.wait_for_completion() shell_output = shell_process.fetch_result() ``` """ input_open_kwargs = self._compile_kwargs(**open_kwargs) process = await self._exit_stack.enter_async_context( open_process(**input_open_kwargs) ) num_commands = len(self.commands) self.logger.info( f"PID {process.pid} triggered with {num_commands} commands running " f"inside the {(self.working_dir or '.')!r} directory." ) return ShellProcess(shell_operation=self, process=process) @sync_compatible async def run(self, **open_kwargs: dict[str, Any]) -> list[str]: """ Runs a shell command, but unlike the trigger method, additionally waits and fetches the result directly, automatically managing the context. This method is ideal for short-lasting shell commands; for long-lasting shell commands, it is recommended to use the `trigger` method instead. Args: **open_kwargs: Additional keyword arguments to pass to `open_process`. Returns: The lines output from the shell command as a list. Examples: Sleep for 5 seconds and then print "Hello, world!": ```python from prefect_shell import ShellOperation shell_output = ShellOperation( commands=["sleep 5", "echo 'Hello, world!'"] ).run() ``` """ input_open_kwargs = self._compile_kwargs(**open_kwargs) async with open_process(**input_open_kwargs) as process: shell_process = ShellProcess(shell_operation=self, process=process) num_commands = len(self.commands) self.logger.info( f"PID {process.pid} triggered with {num_commands} commands running " f"inside the {(self.working_dir or '.')!r} directory." ) await shell_process.wait_for_completion() result = await shell_process.fetch_result() return result @sync_compatible async def close(self): """ Close the job block. """ await self._exit_stack.aclose() self.logger.info("Successfully closed all open processes.") async def aclose(self): """ Asynchronous version of the close method. """ await self.close() async def __aenter__(self) -> "ShellOperation": """ Asynchronous version of the enter method. """ return self async def __aexit__(self, *exc_info: Any): """ Asynchronous version of the exit method. """ await self.close() def __enter__(self) -> "ShellOperation": """ Enter the context of the job block. """ return self def __exit__(self, *exc_info: Any): """ Exit the context of the job block. """ self.close()
ShellOperation
python
fsspec__filesystem_spec
fsspec/callbacks.py
{ "start": 6501, "end": 9210 }
class ____(Callback): """ A callback to display a progress bar using tqdm Parameters ---------- tqdm_kwargs : dict, (optional) Any argument accepted by the tqdm constructor. See the `tqdm doc <https://tqdm.github.io/docs/tqdm/#__init__>`_. Will be forwarded to `tqdm_cls`. tqdm_cls: (optional) subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`. Examples -------- >>> import fsspec >>> from fsspec.callbacks import TqdmCallback >>> fs = fsspec.filesystem("memory") >>> path2distant_data = "/your-path" >>> fs.upload( ".", path2distant_data, recursive=True, callback=TqdmCallback(), ) You can forward args to tqdm using the ``tqdm_kwargs`` parameter. >>> fs.upload( ".", path2distant_data, recursive=True, callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}), ) You can also customize the progress bar by passing a subclass of `tqdm`. .. code-block:: python class TqdmFormat(tqdm): '''Provides a `total_time` format parameter''' @property def format_dict(self): d = super().format_dict total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1) d.update(total_time=self.format_interval(total_time) + " in total") return d >>> with TqdmCallback( tqdm_kwargs={ "desc": "desc", "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}", }, tqdm_cls=TqdmFormat, ) as callback: fs.upload(".", path2distant_data, recursive=True, callback=callback) """ def __init__(self, tqdm_kwargs=None, *args, **kwargs): try: from tqdm import tqdm except ImportError as exce: raise ImportError( "Using TqdmCallback requires tqdm to be installed" ) from exce self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm) self._tqdm_kwargs = tqdm_kwargs or {} self.tqdm = None super().__init__(*args, **kwargs) def call(self, *args, **kwargs): if self.tqdm is None: self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs) self.tqdm.total = self.size self.tqdm.update(self.value - self.tqdm.n) def close(self): if self.tqdm is not None: self.tqdm.close() self.tqdm = None def __del__(self): return self.close() DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback()
TqdmCallback
python
crytic__slither
slither/tools/mutator/mutators/abstract_mutator.py
{ "start": 508, "end": 5653 }
class ____( metaclass=abc.ABCMeta ): # pylint: disable=too-few-public-methods,too-many-instance-attributes NAME = "" HELP = "" def __init__( # pylint: disable=too-many-arguments self, compilation_unit: SlitherCompilationUnit, timeout: int, testing_command: str, testing_directory: str, contract_instance: Contract, solc_remappings: Union[str, None], verbose: bool, output_folder: Path, dont_mutate_line: List[int], rate: int = 10, seed: Optional[int] = None, ) -> None: self.compilation_unit = compilation_unit self.slither = compilation_unit.core self.seed = seed self.rate = rate self.test_command = testing_command self.test_directory = testing_directory self.timeout = timeout self.solc_remappings = solc_remappings self.verbose = verbose self.output_folder = output_folder self.contract = contract_instance self.in_file = self.contract.source_mapping.filename.absolute self.dont_mutate_line = dont_mutate_line # total revert/comment/tweak mutants that were generated and compiled self.total_mutant_counts = [0, 0, 0] # total uncaught revert/comment/tweak mutants self.uncaught_mutant_counts = [0, 0, 0] if not self.NAME: raise IncorrectMutatorInitialization( f"NAME is not initialized {self.__class__.__name__}" ) if not self.HELP: raise IncorrectMutatorInitialization( f"HELP is not initialized {self.__class__.__name__}" ) if rate < 0 or rate > 100: raise IncorrectMutatorInitialization( f"rate must be between 0 and 100 {self.__class__.__name__}" ) def should_mutate_node(self, node) -> bool: return ( not node.source_mapping.lines[0] in self.dont_mutate_line and node.source_mapping.filename.absolute == self.in_file ) @abc.abstractmethod def _mutate(self) -> Dict: """Abstract placeholder, will be overwritten by each mutator""" return {} # pylint: disable=too-many-branches def mutate(self) -> Tuple[List[int], List[int], List[int]]: all_patches: Dict = {} # pylint: disable=broad-exception-caught try: # call _mutate function from different mutators (all_patches) = self._mutate() except Exception as e: logger.error(red("%s mutator failed in %s: %s"), self.NAME, self.contract.name, str(e)) if "patches" not in all_patches: logger.debug("No patches found by %s", self.NAME) return [0, 0, 0], [0, 0, 0], self.dont_mutate_line for file in all_patches["patches"]: # Note: This should only loop over a single file original_txt = self.slither.source_code[file].encode("utf8") patches = all_patches["patches"][file] patches.sort(key=lambda x: x["start"]) for patch in patches: # test the patch patchWasCaught = test_patch( self.output_folder, file, patch, self.test_command, self.NAME, self.timeout, self.solc_remappings, self.verbose, ) # count the uncaught mutants, flag RR/CR mutants to skip further mutations if patchWasCaught == 0: if self.NAME == "RR": self.uncaught_mutant_counts[0] += 1 self.dont_mutate_line.append(patch["line_number"]) elif self.NAME == "CR": self.uncaught_mutant_counts[1] += 1 self.dont_mutate_line.append(patch["line_number"]) else: self.uncaught_mutant_counts[2] += 1 patched_txt, _ = apply_patch(original_txt, patch, 0) diff = create_diff(self.compilation_unit, original_txt, patched_txt, file) if not diff: logger.info(f"Impossible to generate patch; empty {patches}") # add uncaught mutant patches to a output file with (self.output_folder / "patches_files.txt").open( "a", encoding="utf8" ) as patches_file: patches_file.write(diff + "\n") # count the total number of mutants that we were able to compile if patchWasCaught != 2: if self.NAME == "RR": self.total_mutant_counts[0] += 1 elif self.NAME == "CR": self.total_mutant_counts[1] += 1 else: self.total_mutant_counts[2] += 1 return self.total_mutant_counts, self.uncaught_mutant_counts, self.dont_mutate_line
AbstractMutator
python
huggingface__transformers
src/transformers/models/esm/openfold_utils/protein.py
{ "start": 1001, "end": 11497 }
class ____: """Protein structure representation.""" # Cartesian coordinates of atoms in angstroms. The atom types correspond to # residue_constants.atom_types, i.e. the first three are N, CA, CB. atom_positions: np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. aatype: np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. atom_mask: np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. residue_index: np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. b_factors: np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions chain_index: Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files remark: Optional[str] = None # Templates used to generate this protein (prediction-only) parents: Optional[Sequence[str]] = None # Chain corresponding to each parent parents_chain_index: Optional[Sequence[int]] = None def from_proteinnet_string(proteinnet_str: str) -> Protein: tag_re = r"(\[[A-Z]+\]\n)" tags: list[str] = [tag.strip() for tag in re.split(tag_re, proteinnet_str) if len(tag) > 0] groups: Iterator[tuple[str, list[str]]] = zip(tags[0::2], [l.split("\n") for l in tags[1::2]]) atoms: list[str] = ["N", "CA", "C"] aatype = None atom_positions = None atom_mask = None for g in groups: if "[PRIMARY]" == g[0]: seq = g[1][0].strip() for i in range(len(seq)): if seq[i] not in residue_constants.restypes: seq[i] = "X" # FIXME: strings are immutable aatype = np.array( [residue_constants.restype_order.get(res_symbol, residue_constants.restype_num) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: tertiary: list[list[float]] = [] for axis in range(3): tertiary.append(list(map(float, g[1][axis].split()))) tertiary_np = np.array(tertiary) atom_positions = np.zeros((len(tertiary[0]) // 3, residue_constants.atom_type_num, 3)).astype(np.float32) for i, atom in enumerate(atoms): atom_positions[:, residue_constants.atom_order[atom], :] = np.transpose(tertiary_np[:, i::3]) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: mask = np.array(list(map({"-": 0, "+": 1}.get, g[1][0].strip()))) atom_mask = np.zeros( ( len(mask), residue_constants.atom_type_num, ) ).astype(np.float32) for i, atom in enumerate(atoms): atom_mask[:, residue_constants.atom_order[atom]] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=atom_positions, atom_mask=atom_mask, aatype=aatype, residue_index=np.arange(len(aatype)), b_factors=None, ) def get_pdb_headers(prot: Protein, chain_id: int = 0) -> list[str]: pdb_headers: list[str] = [] remark = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}") parents = prot.parents parents_chain_index = prot.parents_chain_index if parents is not None and parents_chain_index is not None: parents = [p for i, p in zip(parents_chain_index, parents) if i == chain_id] if parents is None or len(parents) == 0: parents = ["N/A"] pdb_headers.append(f"PARENT {' '.join(parents)}") return pdb_headers def add_pdb_headers(prot: Protein, pdb_str: str) -> str: """Add pdb headers to an existing PDB string. Useful during multi-chain recycling """ out_pdb_lines: list[str] = [] lines = pdb_str.split("\n") remark = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}") parents_per_chain: list[list[str]] if prot.parents is not None and len(prot.parents) > 0: parents_per_chain = [] if prot.parents_chain_index is not None: parent_dict: dict[str, list[str]] = {} for p, i in zip(prot.parents, prot.parents_chain_index): parent_dict.setdefault(str(i), []) parent_dict[str(i)].append(p) max_idx = max(int(chain_idx) for chain_idx in parent_dict) for i in range(max_idx + 1): chain_parents = parent_dict.get(str(i), ["N/A"]) parents_per_chain.append(chain_parents) else: parents_per_chain.append(list(prot.parents)) else: parents_per_chain = [["N/A"]] def make_parent_line(p: Sequence[str]) -> str: return f"PARENT {' '.join(p)}" out_pdb_lines.append(make_parent_line(parents_per_chain[0])) chain_counter = 0 for i, l in enumerate(lines): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(l) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(parents_per_chain): chain_parents = parents_per_chain[chain_counter] else: chain_parents = ["N/A"] out_pdb_lines.append(make_parent_line(chain_parents)) return "\n".join(out_pdb_lines) def to_pdb(prot: Protein) -> str: """Converts a `Protein` instance to a PDB string. Args: prot: The protein to convert to PDB. Returns: PDB string. """ restypes = residue_constants.restypes + ["X"] def res_1to3(r: int) -> str: return residue_constants.restype_1to3.get(restypes[r], "UNK") atom_types = residue_constants.atom_types pdb_lines: list[str] = [] atom_mask = prot.atom_mask aatype = prot.aatype atom_positions = prot.atom_positions residue_index = prot.residue_index.astype(np.int32) b_factors = prot.b_factors chain_index = prot.chain_index if np.any(aatype > residue_constants.restype_num): raise ValueError("Invalid aatypes.") headers = get_pdb_headers(prot) if len(headers) > 0: pdb_lines.extend(headers) n = aatype.shape[0] atom_index = 1 prev_chain_index = 0 chain_tags = string.ascii_uppercase chain_tag = None # Add all atom sites. for i in range(n): res_name_3 = res_1to3(aatype[i]) for atom_name, pos, mask, b_factor in zip(atom_types, atom_positions[i], atom_mask[i], b_factors[i]): if mask < 0.5: continue record_type = "ATOM" name = atom_name if len(atom_name) == 4 else f" {atom_name}" alt_loc = "" insertion_code = "" occupancy = 1.00 element = atom_name[0] # Protein supports only C, N, O, S, this works. charge = "" chain_tag = "A" if chain_index is not None: chain_tag = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! atom_line = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_3:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(atom_line) atom_index += 1 should_terminate = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: should_terminate = True prev_chain_index = chain_index[i + 1] if should_terminate: # Close the chain. chain_end = "TER" chain_termination_line = ( f"{chain_end:<6}{atom_index:>5} {res_1to3(aatype[i]):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(chain_termination_line) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(prot, prev_chain_index)) pdb_lines.append("END") pdb_lines.append("") return "\n".join(pdb_lines) def ideal_atom_mask(prot: Protein) -> np.ndarray: """Computes an ideal atom mask. `Protein.atom_mask` typically is defined according to the atoms that are reported in the PDB. This function computes a mask according to heavy atoms that should be present in the given sequence of amino acids. Args: prot: `Protein` whose fields are `numpy.ndarray` objects. Returns: An ideal atom mask. """ return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def from_prediction( features: FeatureDict, result: ModelOutput, b_factors: Optional[np.ndarray] = None, chain_index: Optional[np.ndarray] = None, remark: Optional[str] = None, parents: Optional[Sequence[str]] = None, parents_chain_index: Optional[Sequence[int]] = None, ) -> Protein: """Assembles a protein from a prediction. Args: features: Dictionary holding model inputs. result: Dictionary holding model outputs. b_factors: (Optional) B-factors to use for the protein. chain_index: (Optional) Chain indices for multi-chain predictions remark: (Optional) Remark about the prediction parents: (Optional) List of template names Returns: A protein instance. """ return Protein( aatype=features["aatype"], atom_positions=result["final_atom_positions"], atom_mask=result["final_atom_mask"], residue_index=features["residue_index"] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"]), chain_index=chain_index, remark=remark, parents=parents, parents_chain_index=parents_chain_index, )
Protein